python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
aristo-leaderboard-master | eqasc/code/allennlp_reasoning_explainqa/common/__init__.py |
|
#!/usr/bin/env python3
# % cat testfiles-5/predictions.tsv | sort | python3 explainer.py
# In paragraph 4, sentence 2, the participant "plants" is moved from an unknown location to sediment
# In paragraph 4, sentence 3, the participant "bacteria" is moved from an unknown location to sediment
# In paragraph 4, sentence 8, the participant "plants" is moved from sediment to one mile underground
# In paragraph 4, sentence 8, the participant "sediment" is moved from an unknown location to underground
# In paragraph 4, sentence 10, the participant "bacteria" is destroyed at "sediment"
# In paragraph 4, sentence 10, the participant "oil" is created at "underground"
# In paragraph 4, sentence 10, the participant "plants" is destroyed at "one mile underground"
import sys
explanations = []
for line in sys.stdin:
line = line.strip()
paragraph_id, sentence, participant, action, location_before, location_after = line.split("\t")
event = ""
if action == "DESTROY":
if location_before == "?":
location_before = f"an unknown location"
event = f"destroyed at `{location_before}`"
elif action == "CREATE":
if location_after == "?":
location_after = f"an unknown location"
event = f"created at `{location_after}`"
elif action == "MOVE":
if location_before == "?":
location_before = f"an unknown location"
if location_after == "?":
location_after = f"an unknown location"
event = f"moved from `{location_before}` to `{location_after}`"
if event:
explanation = f"In paragraph {paragraph_id}, sentence {sentence}, the participant `{participant}` is {event}"
explanations.append((int(paragraph_id), int(sentence), explanation))
for _, _, explanation in sorted(explanations):
print(explanation)
| aristo-leaderboard-master | propara/evaluator/explainer.py |
aristo-leaderboard-master | propara/evaluator/__init__.py |
|
#!/usr/bin/env python3
import argparse
import json
from typing import Dict
from evaluation import Evaluation
from process import sentences_from_sentences_file, ActionFile
from scoring import QuestionScores
from errors import corrupted_action_file, corrupted_sentences_file
def main(answers_file: str, predictions_file: str, output_file: str, diagnostics_file: str, sentences_file: str):
# create diagnostics file if needed
diagnostics = None
sentences = None
if diagnostics_file:
diagnostics = open(diagnostics_file, mode='w')
print(f"Writing diagnostics to file {diagnostics_file}")
diagnostics.write(
f"Diagnostics of evaluation of predictions in {predictions_file} against answers in {answers_file}\n")
diagnostics.write("\n")
if sentences_file:
sentences = sentences_from_sentences_file(sentences_file)
# Step 1 and 2. Read and summarize answers and predictions
predictions = ActionFile.from_file(predictions_file)
answers = ActionFile.from_file(answers_file)
# Abort if there are differences
diff_report = answers.diff_participants(predictions)
if diff_report:
print(f"Participants in predictions file {predictions_file} are not exact matches to participants")
print(f"in {answers_file}. Detailed report:")
print()
print("\n".join(diff_report))
print()
corrupted_action_file(
filename=predictions_file,
details=f"Some participants are missing or unexpected."
)
predictions_summary = predictions.summarize()
answers_summary = answers.summarize()
# Step 3. Calculate per-process scores
scores_by_process = dict() # type: Dict[int, QuestionScores]
for process_id, answer_summary in answers_summary.items():
if process_id not in predictions_summary:
corrupted_action_file(
filename=predictions_file,
details=f"Prediction for process_id {answer_summary.process_id} is missing."
)
prediction_summary = predictions_summary[process_id]
score = QuestionScores.from_summaries(answer_summary, prediction_summary)
scores_by_process[process_id] = score
if diagnostics:
diag_struct = {
"prediction_summary": prediction_summary.diagnostics(),
"answer_summary": answer_summary.diagnostics(),
"score": {
"process_id": process_id,
"inputs": score.inputs.diagnostics(),
"outputs": score.outputs.diagnostics(),
"conversions": score.conversions.diagnostics(),
"moves": score.moves.diagnostics(),
}
}
if sentences:
if process_id not in sentences:
corrupted_sentences_file(
filename=sentences_file,
details=f"Sentences for process {process_id} not found."
)
sentences_for_diag = []
for i, text in enumerate(sentences[process_id]):
sentences_for_diag.append({
"step_number": 1 + i,
"text": text,
})
diag_struct["sentences"] = sentences_for_diag # type: ignore
diagnostics.write(json.dumps(diag_struct, indent=4))
diagnostics.write("\n")
# Step 4. Calculate a final evaluation
evaluation = Evaluation(scores_by_process)
# Step 5. Print a report and generate output file
report(evaluation, len(predictions_summary), len(answers_summary))
overall_scores = {
"precision": round(evaluation.overall.precision, 3),
"recall": round(evaluation.overall.recall, 3),
"f1": round(evaluation.overall.F1(), 3)
}
if output_file:
print("Writing results to file: %s" % output_file)
with open(output_file, "wt", encoding="UTF-8") as output:
output.write(json.dumps(overall_scores))
if diagnostics:
diag_struct = {"overall_scores": overall_scores}
diagnostics.write(json.dumps(diag_struct, indent=4))
diagnostics.write("\n")
# close diagnostics file
if diagnostics:
diagnostics.close()
def report(e: Evaluation, num_predictions: int, num_answers: int):
i = e.inputs
o = e.outputs
c = e.conversions
m = e.moves
overall = e.overall
print("=================================================")
print("Question Avg. Precision Avg. Recall Avg. F1")
print("-------------------------------------------------")
print("Inputs %4.3f %4.3f %4.3f" % (i.precision, i.recall, i.F1()))
print("Outputs %4.3f %4.3f %4.3f" % (o.precision, o.recall, o.F1()))
print("Conversions %4.3f %4.3f %4.3f" % (c.precision, c.recall, c.F1()))
print("Moves %4.3f %4.3f %4.3f" % (m.precision, m.recall, m.F1()))
print("-------------------------------------------------")
print("Overall Precision %4.3f " % overall.precision)
print("Overall Recall %4.3f " % overall.recall)
print("Overall F1 %4.3f " % overall.F1())
print("=================================================")
print()
print(f"Evaluated {num_predictions} predictions against {num_answers} answers.")
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluator for ProPara Leaderboard')
parser.add_argument('--predictions', '-p',
action='store',
dest='predictions_file',
required=True,
help='Path to file with predictions')
parser.add_argument('--answers', '-a',
action='store',
dest='answers_file',
required=True,
help='Path to file with answers')
parser.add_argument('--output', '-o',
action='store',
dest='output_file',
help='Output results to this file.')
parser.add_argument('--diagnostics', '-d',
action='store',
dest='diagnostics_file',
help='Write diagnostics to this file.')
parser.add_argument('--sentences', '-s',
action='store',
dest='sentences_file',
help='Path to file with sentences.')
args = parser.parse_args()
main(args.answers_file, args.predictions_file, args.output_file, args.diagnostics_file, args.sentences_file)
| aristo-leaderboard-master | propara/evaluator/evaluator.py |
from typing import List, NamedTuple, Callable, TypeVar, Optional
from evaluation.metric import Metric
from text import terms
from process import ProcessSummary, Conversion, Move, Input, Output
# Question types used in functions here
QType = TypeVar("QType", Input, Output, Conversion, Move)
class QuestionScores(NamedTuple):
inputs: Metric
outputs: Metric
conversions: Metric
moves: Metric
@classmethod
def from_summaries(cls, answer: ProcessSummary, prediction: ProcessSummary):
return cls(
inputs=_score_inputs(answer.inputs, prediction.inputs),
outputs=_score_outputs(answer.outputs, prediction.outputs),
conversions=_score_conversions(answer.conversions, prediction.conversions),
moves=_score_moves(answer.moves, prediction.moves),
)
def _edgecases(answers: List[QType], predictions: List[QType]) -> Optional[Metric]:
if len(answers) == 0 and len(predictions) == 0:
return Metric(precision=1.0, recall=1.0)
if len(answers) == 0:
return Metric(precision=0.0, recall=1.0)
if len(predictions) == 0:
return Metric(precision=1.0, recall=0.0)
return None
def _score_inputs(answers: List[Input], predictions: List[Input]) -> Metric:
m = _edgecases(answers, predictions)
if m:
return m
return _score(answers, predictions, _score_input_pair)
def _score_input_pair(answer: Input, prediction: Input) -> float:
return _compare_participants(answer.participants, prediction.participants)
def _score_outputs(answers: List[Output], predictions: List[Output]) -> Metric:
m = _edgecases(answers, predictions)
if m:
return m
return _score(answers, predictions, _score_output_pair)
def _score_output_pair(answer: Output, prediction: Output) -> float:
return _compare_participants(answer.participants, prediction.participants)
def _score_conversions(answers: List[Conversion], predictions: List[Conversion]) -> Metric:
m = _edgecases(answers, predictions)
if m:
return m
return _score(answers, predictions, _score_conversion_pair)
def _score_conversion_pair(answer: Conversion, prediction: Conversion) -> float:
if answer.step_id != prediction.step_id:
return 0.0
return sum((_compare_locations(answer.locations, prediction.locations),
_compare_participants(answer.destroyed, prediction.destroyed),
_compare_participants(answer.created, prediction.created))) / 3
def _score_moves(answers: List[Move], predictions: List[Move]) -> Metric:
m = _edgecases(answers, predictions)
if m:
return m
return _score(answers, predictions, _score_move_pair)
def _score_move_pair(answer: Move, prediction: Move) -> float:
if answer.step_id != prediction.step_id:
return 0.0
return sum((_compare_participants(answer.participants, prediction.participants),
_compare_locations(answer.location_before, prediction.location_before),
_compare_locations(answer.location_after, prediction.location_after))) / 3
def _compare_participants(answer: str, prediction: str) -> float:
# Trivial match
if answer == prediction:
return 1.0
prediction_terms = terms.extract_termsets(prediction)
answer_terms = terms.extract_termsets(answer)
# calculate Jaccard similarity score
numerator = terms.terms_overlap(prediction_terms, answer_terms)
denominator = len(prediction_terms) + len(answer_terms) - numerator
return numerator / denominator
def _compare_locations(answer: str, prediction: str) -> float:
if answer == prediction:
return 1.0
prediction_terms = terms.extract_termsets_with_normalization(prediction)
answer_terms = terms.extract_termsets_with_normalization(answer)
# calculate Jaccard similarity score
numerator = terms.terms_overlap(prediction_terms, answer_terms)
denominator = len(prediction_terms) + len(answer_terms) - numerator
return numerator / denominator
# Score a pair of QType answers and predictions, such that:
#
# precision = precision_numerator / len(predictions)
# recall = recall_numerator / len(answers)
#
# The calculation of precision and recall numerators depends on the number of answers and predictions. In these
# examples, a1 and a2 are answers and p1, p2 and p3 are predictions. Combinations (like a2p3) indicate a score for the
# answer-prediction pair (like a2 and p3).
#
# Example 1: answers = [a1,a2] predictions = [p1]
# precision_numerator = max(a1p1, a2p1)
# recall_numerator = max(a1p1) + max(a2p1)
#
# Example 2: answers = [a1,a2] predictions = [p1,p2]
# precision_numerator = max(a1p1, a2p1) + max(a1p2, a2p2)
# recall_numerator = max(a1p1, a2p1) + max(a1p2, a2p2)
#
# Example 3: answers = [a1,a2] predictions = [p1,p2,p3]
# precision_numerator = max(a1p1, a2p1) + max(a1p2, a2p2) + max(a1p3, a2p3)
# recall_numerator = max(a1p1, a1p2, a1p3) + max(a2p1, a2p2, a2p3)
def _score(answers: List[QType], predictions: List[QType], scoring_function: Callable[[QType, QType], float]) -> Metric:
precision_numerator = 0.0
for p in predictions:
max_score = 0.0
for a in answers:
max_score = max(max_score, scoring_function(a, p))
precision_numerator += max_score
# only compute recall numerator when number of predictions doesn't match number of expected answers
recall_numerator = precision_numerator
if len(predictions) != len(answers):
recall_numerator = 0.0
for a in answers:
max_score = 0.0
for p in predictions:
max_score = max(max_score, scoring_function(a, p))
recall_numerator += max_score
if precision_numerator == 0.0:
precision = 0.0
else:
precision = precision_numerator / (1.0 * len(predictions))
if recall_numerator == 0.0:
recall = 0.0
else:
recall = recall_numerator / (1.0 * len(answers))
return Metric(precision=precision, recall=recall)
| aristo-leaderboard-master | propara/evaluator/scoring/question.py |
from scoring.question import QuestionScores
| aristo-leaderboard-master | propara/evaluator/scoring/__init__.py |
import unittest
from process import ProcessSummary, Conversion, Move, Input, Output
from scoring import question, QuestionScores
class TestScoring(unittest.TestCase):
def test_compare_locations(self):
self.assertEquals(question._compare_locations('', ''), 1.0)
self.assertEquals(question._compare_locations('', '-'), 0.0)
self.assertEquals(question._compare_locations('?', '?'), 1.0)
self.assertEquals(question._compare_locations('plant OR leaf', 'leaf'), 1.0)
self.assertEquals(question._compare_locations('', 'leaf'), 0.0)
self.assertEquals(question._compare_locations('-', 'leaf'), 0.0)
self.assertEquals(question._compare_locations('plant OR leaf', 'leaf'), 1.0)
self.assertEquals(question._compare_locations('dew OR rain', 'water OR dew'), 1.0)
self.assertEquals(question._compare_locations('dew', 'dew AND sun'), 0.5)
self.assertEquals(question._compare_locations('dew AND sun', 'dew'), 0.5)
self.assertEquals(question._compare_locations('dew AND sun', 'dew AND blah1 AND blah2'), 0.25)
self.assertEquals(question._compare_locations('dew AND rain', 'water OR dew'), 0.5)
self.assertEquals(question._compare_locations('water OR dew AND sun', 'dew OR rain'), 0.5)
def test_score_tuple_question(self):
answers = [
Move(participants='plant OR leaf', location_before='root', location_after='earth', step_id='event2'),
Move(participants='leaf', location_before='soil', location_after='mud', step_id='event2'),
]
predictions = [
Move(participants='plants OR leaf', location_before='root', location_after='earth', step_id='event2'),
Move(participants='plant', location_before='mud OR plant', location_after='room OR earth', step_id='event2')
]
predictions_longer = [
Move(participants='plants OR leaf', location_before='root', location_after='earth', step_id='event2'),
Move(participants='plant', location_before='mud OR plant', location_after='room OR earth',
step_id='event2'),
Move(participants='tree', location_before='monkey', location_after='earth', step_id='event2'),
]
predictions_shorter = [
Move(participants='plants OR leaf', location_before='root', location_after='earth', step_id='event2'),
]
self.assertEquals(question._score_moves(answers, predictions).F1(), 0.8333333333333333)
self.assertEquals(question._score_moves(answers, predictions_shorter).F1(), 0.8)
self.assertEquals(question._score_moves(answers, predictions_longer).F1(), 0.6666666666666666)
self.assertEquals(question._score_moves(answers, []).F1(), 0.0)
self.assertEquals(question._score_moves([], predictions).F1(), 0.0)
self.assertEquals(question._score_moves([], []).F1(), 1.0)
def test_score_conversion_pair(self):
self.assertEquals(question._score_conversion_pair(
Conversion(destroyed='animal OR monkey', created='tree', locations='branch', step_id='event1'),
Conversion(destroyed='animal', created='tree', locations='branch', step_id='event1')
), 1.0)
self.assertEquals(question._score_conversion_pair(
Conversion(destroyed='plant OR leaf', created='root', locations='earth', step_id='event2'),
Conversion(destroyed='leaf', created='root OR plant', locations='soil OR earth', step_id='event2'),
), 1.0)
# plants should match plant.
self.assertEquals(question._score_conversion_pair(
Conversion(destroyed='plants OR leaf', created='root', locations='earth', step_id='event2'),
Conversion(destroyed='leaf', created='root OR plant', locations='soil OR earth', step_id='event2'),
), 1.0)
# identical conversion, but mismatching step_ids
self.assertEquals(question._score_conversion_pair(
Conversion(destroyed='foo', created='bar', locations='baz', step_id='eventX'),
Conversion(destroyed='foo', created='bar', locations='baz', step_id='eventY'),
), 0.0)
def test_score_input_pair(self):
self.assertEquals(question._score_input_pair(
Input(participants=''), Input(participants='-')
), 0)
self.assertEquals(question._score_input_pair(
Input(participants='plant OR leaf'), Input(participants='leaf')
), 1)
self.assertEquals(question._score_input_pair(
Input(participants='?'), Input(participants='?')
), 1)
def test_calculate(self):
score = QuestionScores.from_summaries(
answer=ProcessSummary(
1,
inputs=[Input(participants='plant')],
outputs=[Output(participants='plant OR leaf'), Output(participants='soil')],
conversions=[],
moves=[
Move(participants="plant OR leaf",
location_before="root",
location_after="event2",
step_id="event2")
]
),
prediction=ProcessSummary(
1,
inputs=[Input(participants='tree')],
outputs=[Output(participants='leaf'), Output(participants='mud')],
conversions=[Conversion(destroyed='tree', created='root', locations='soil', step_id='event1')],
moves=[Move(participants='plant', location_before='root', location_after='soil', step_id='event2')]
),
)
self.assertEquals(score.conversions.F1(), 0.0)
score = QuestionScores.from_summaries(
answer=ProcessSummary(
1,
inputs=[Input(participants='monkey'), Input(participants='ape')],
outputs=[Output(participants='langur OR langer')],
conversions=[
Conversion(destroyed='animal OR monkey', created='tree', locations='branch', step_id='event1')],
moves=[],
),
prediction=ProcessSummary(
1,
inputs=[Input(participants='langur'), Input(participants='ape')],
outputs=[Output(participants='monkey')],
conversions=[Conversion(destroyed='animal', created='tree', locations='branch', step_id='event1')],
moves=[],
),
)
self.assertEquals(score.conversions.F1(), 1.0)
def test_score_empty_answers(self):
score = QuestionScores.from_summaries(
answer=ProcessSummary(process_id=1, inputs=[], outputs=[], conversions=[], moves=[]),
prediction=ProcessSummary(process_id=1, inputs=[], outputs=[], conversions=[], moves=[])
)
self.assertEquals(score.inputs.F1(), 1.0)
self.assertEquals(score.outputs.F1(), 1.0)
self.assertEquals(score.conversions.F1(), 1.0)
def test_score(self):
i1 = Input(participants='xxx')
i2 = Input(participants='yyy')
i3 = Input(participants='zzz')
answers = [i1]
predictions = [i2, i3]
def scoring_function(answer: Input, prediction: Input) -> float:
return 1.0
score = question._score(answers, predictions, scoring_function)
self.assertEqual(score.precision, 1.0)
self.assertEqual(score.recall, 1.0)
self.assertEqual(score.F1(), 1.0)
def test_score2(self):
i1 = Input(participants='xxx')
i2 = Input(participants='yyy')
i3 = Input(participants='zzz')
answers = [i1]
predictions = [i2, i3]
def scoring_function(answer: Input, prediction: Input) -> float:
if (answer, prediction) == (i1, i2):
return 1.0
return 0.0
score = question._score(answers, predictions, scoring_function)
self.assertEqual(score.precision, 0.5)
self.assertEqual(score.recall, 1.0)
self.assertEqual(score.F1(), 2 / 3)
if __name__ == '__main__':
unittest.main()
| aristo-leaderboard-master | propara/evaluator/scoring/test_scoring.py |
from typing import Dict, NamedTuple, Iterable
from evaluation.metric import Metric
class EvaluationAverages(NamedTuple):
inputs: float
outputs: float
conversions: float
moves: float
overall: float
class Evaluation:
def __init__(self, scores: Dict[int, "QuestionScores"]) -> None: # type: ignore
precision = Evaluation._precision(scores.values())
recall = Evaluation._recall(scores.values())
self.inputs = Metric(precision=precision.inputs, recall=recall.inputs)
self.outputs = Metric(precision=precision.outputs, recall=recall.outputs)
self.conversions = Metric(precision=precision.conversions, recall=recall.conversions)
self.moves = Metric(precision=precision.moves, recall=recall.moves)
self.overall = Metric(precision=precision.overall, recall=recall.overall)
@staticmethod
def _precision(scores: Iterable["QuestionScores"]) -> EvaluationAverages: # type: ignore
inputs = 0.0
outputs = 0.0
conversions = 0.0
moves = 0.0
num_processes = 0
for score in scores:
inputs += score.inputs.precision
outputs += score.outputs.precision
conversions += score.conversions.precision
moves += score.moves.precision
num_processes += 1
inputs_avg = round(inputs / num_processes, 3)
outputs_avg = round(outputs / num_processes, 3)
conversions_avg = round(conversions / num_processes, 3)
moves_avg = round(moves / num_processes, 3)
overall = (inputs_avg + outputs_avg + conversions_avg + moves_avg) / 4
return EvaluationAverages(
inputs=inputs_avg,
outputs=outputs_avg,
conversions=conversions_avg,
moves=moves_avg,
overall=overall,
)
@staticmethod
def _recall(scores: Iterable["QuestionScores"]) -> EvaluationAverages: # type: ignore
inputs = 0.0
outputs = 0.0
conversions = 0.0
moves = 0.0
num_processes = 0
for score in scores:
inputs += score.inputs.recall
outputs += score.outputs.recall
conversions += score.conversions.recall
moves += score.moves.recall
num_processes += 1
inputs_avg = round(inputs / num_processes, 3)
outputs_avg = round(outputs / num_processes, 3)
conversions_avg = round(conversions / num_processes, 3)
moves_avg = round(moves / num_processes, 3)
overall = (inputs_avg + outputs_avg + conversions_avg + moves_avg) / 4
return EvaluationAverages(
inputs=inputs_avg,
outputs=outputs_avg,
conversions=conversions_avg,
moves=moves_avg,
overall=overall,
)
| aristo-leaderboard-master | propara/evaluator/evaluation/evaluation.py |
from evaluation.metric import Metric
from evaluation.evaluation import Evaluation
| aristo-leaderboard-master | propara/evaluator/evaluation/__init__.py |
from typing import Dict, NamedTuple
class Metric(NamedTuple):
precision: float
recall: float
def F1(self):
if self.precision + self.recall == 0:
return 0.0
return 2 * self.precision * self.recall / (self.precision + self.recall)
def diagnostics(self) -> Dict[str, float]:
return {
"precision": self.precision,
"recall": self.recall
}
| aristo-leaderboard-master | propara/evaluator/evaluation/metric.py |
from typing import List, Set
from text.stemmer import PorterStemmer
# Extract term sets from a phrase containing " AND " and " OR " tokens. A phrase like "foo OR bar AND fnord OR gnarf"
# is turned into a list of term sets like [{"foo", "bar"}, {"fnord", "gnarf"}] to match to another phrase's term sets.
def extract_termsets(phrase: str) -> List[Set[str]]:
outer = [p.strip() for p in phrase.split(" AND ")]
inner = [set(item.split(" OR ")) for item in outer]
return inner
# Extract term sets from a phrase containing " AND " and " OR " tokens. A phrase like "foo OR bar AND fnord OR gnarf"
# is turned into a list of term sets like [{"foo", "bar"}, {"fnord", "gnarf"}] to match to another phrase's term sets.
#
# This function normalizes each word.
def extract_termsets_with_normalization(phrase: str) -> List[Set[str]]:
outer = [p.strip() for p in phrase.split(" AND ")]
inner = [set(_normalize_words(item.split(" OR "))) for item in outer]
return inner
def terms_overlap(phrase1_terms: List[Set[str]], phrase2_terms: List[Set[str]]):
num = 0
for t1 in phrase1_terms:
for t2 in phrase2_terms:
if t1.intersection(t2):
num += 1
return num
def _normalize_words(words: List[str]) -> List[str]:
stemmed = [] # type: List[str]
for w in words:
if not w or len(w.strip()) == 0:
return [""]
w_lower = w.lower()
# Remove leading articles from the phrase (e.g., the rays => rays).
articles = ["a", "an", "the", "your", "his", "their", "my", "another", "other", "this", "that"]
starting_article = next((article for article in articles if w_lower.startswith(_leading_word(article))), None)
if starting_article is not None:
w_lower = w_lower.replace(_leading_word(starting_article), "", 1)
# Porter stemmer: rays => ray
stemmed.append(PorterStemmer().stem(w_lower).strip())
return stemmed
def _leading_word(word):
return word + " "
| aristo-leaderboard-master | propara/evaluator/text/terms.py |
aristo-leaderboard-master | propara/evaluator/text/__init__.py |
|
import unittest
from text import terms
class TestTerms(unittest.TestCase):
def test_extract_termsets(self):
# one term
self.assertEqual(terms.extract_termsets("dew"), [{'dew'}])
# one term with a word that should not be stemmed
self.assertEqual(terms.extract_termsets("raining"), [{'raining'}])
def test_extract_termsets_with_normalization(self):
# one term
self.assertEqual(terms.extract_termsets_with_normalization("dew"), [{'dew'}])
# one term with a word that should be normalized
self.assertEqual(terms.extract_termsets_with_normalization("raining"), [{'rain'}])
# one term with two words, one that gets normalized
self.assertEqual(terms.extract_termsets_with_normalization("raining cats and dogs"), [{'raining cats and dog'}])
# ANDed terms
self.assertEqual(terms.extract_termsets_with_normalization("dew AND rain"), [{'dew'}, {'rain'}])
# ORed terms
self.assertEqual(terms.extract_termsets_with_normalization("dew OR rain"), [{'dew', 'rain'}])
# ORed and ANDed terms
self.assertEqual(terms.extract_termsets_with_normalization("dew OR rain AND sun"), [{'dew', 'rain'}, {'sun'}])
# more complex arrangements
self.assertEqual(
terms.extract_termsets_with_normalization("dew OR rain AND sun AND foo OR bar OR baz"),
[
{'dew', 'rain'},
{'sun'},
{'foo', 'bar', 'baz'}
]
)
# as above, but "droplet" and "droplets" in the phrase should become one term "droplet"
self.assertEqual(
terms.extract_termsets_with_normalization("dew OR droplet OR droplets AND sun AND foo OR bar OR baz"),
[
{'dew', 'droplet'},
{'sun'},
{'foo', 'bar', 'baz'}
]
)
def test_terms_overlap(self):
self.assertEqual(
terms.terms_overlap(
[{'foo'}],
[{'foo'}]
),
1
)
self.assertEqual(
terms.terms_overlap(
[{'foo'}],
[{'bar'}]
),
0
)
self.assertEqual(
terms.terms_overlap(
[{'diesel'}, {'energi'}],
[{'diesel'}, {'petrol'}]
),
1
)
self.assertEqual(
terms.terms_overlap(
[{'plant', 'anim'}],
[{'soft tissu'}]
),
0
)
self.assertEqual(
terms.terms_overlap(
[{'nitrogen'}],
[{'fixed nitrogen', 'usable nitrogen'}]
),
0
)
self.assertEqual(
terms.terms_overlap(
[{'rain'}, {'water', 'liquid'}],
[{'rain'}, {'water'}]
),
2
)
def test_normalization(self):
self.assertEqual(
terms._normalize_words(["the Raining", "DANCING", "experimenting"]),
["rain", "danc", "experi"]
)
if __name__ == '__main__':
unittest.main()
| aristo-leaderboard-master | propara/evaluator/text/test_terms.py |
"""
This was copied from the NLTK source:
https://github.com/nltk/nltk/blob/7e06fcb2be41a7dbc23bf0b4f666aef7b915d402/nltk/stem/porter.py
It was modified slightly to run outside NLTK.
"""
"""
Porter Stemmer
This is the Porter stemming algorithm. It follows the algorithm
presented in
Porter, M. "An algorithm for suffix stripping." Program 14.3 (1980): 130-137.
with some optional deviations that can be turned on or off with the
`mode` argument to the constructor.
Martin Porter, the algorithm's inventor, maintains a web page about the
algorithm at
http://www.tartarus.org/~martin/PorterStemmer/
which includes another Python implementation and other implementations
in many languages.
"""
__docformat__ = 'plaintext'
class PorterStemmer:
"""
A word stemmer based on the Porter stemming algorithm.
Porter, M. "An algorithm for suffix stripping."
Program 14.3 (1980): 130-137.
See http://www.tartarus.org/~martin/PorterStemmer/ for the homepage
of the algorithm.
Martin Porter has endorsed several modifications to the Porter
algorithm since writing his original paper, and those extensions are
included in the implementations on his website. Additionally, others
have proposed further improvements to the algorithm, including NLTK
contributors. There are thus three modes that can be selected by
passing the appropriate constant to the class constructor's `mode`
attribute:
PorterStemmer.ORIGINAL_ALGORITHM
- Implementation that is faithful to the original paper.
Note that Martin Porter has deprecated this version of the
algorithm. Martin distributes implementations of the Porter
Stemmer in many languages, hosted at:
http://www.tartarus.org/~martin/PorterStemmer/
and all of these implementations include his extensions. He
strongly recommends against using the original, published
version of the algorithm; only use this mode if you clearly
understand why you are choosing to do so.
PorterStemmer.MARTIN_EXTENSIONS
- Implementation that only uses the modifications to the
algorithm that are included in the implementations on Martin
Porter's website. He has declared Porter frozen, so the
behaviour of those implementations should never change.
PorterStemmer.NLTK_EXTENSIONS (default)
- Implementation that includes further improvements devised by
NLTK contributors or taken from other modified implementations
found on the web.
For the best stemming, you should use the default NLTK_EXTENSIONS
version. However, if you need to get the same results as either the
original algorithm or one of Martin Porter's hosted versions for
compability with an existing implementation or dataset, you can use
one of the other modes instead.
"""
# Modes the Stemmer can be instantiated in
NLTK_EXTENSIONS = 'NLTK_EXTENSIONS'
MARTIN_EXTENSIONS = 'MARTIN_EXTENSIONS'
ORIGINAL_ALGORITHM = 'ORIGINAL_ALGORITHM'
def __init__(self, mode=NLTK_EXTENSIONS):
if mode not in (
self.NLTK_EXTENSIONS,
self.MARTIN_EXTENSIONS,
self.ORIGINAL_ALGORITHM
):
raise ValueError(
"Mode must be one of PorterStemmer.NLTK_EXTENSIONS, "
"PorterStemmer.MARTIN_EXTENSIONS, or "
"PorterStemmer.ORIGINAL_ALGORITHM"
)
self.mode = mode
if self.mode == self.NLTK_EXTENSIONS:
# This is a table of irregular forms. It is quite short,
# but still reflects the errors actually drawn to Martin
# Porter's attention over a 20 year period!
irregular_forms = {
"sky": ["sky", "skies"],
"die": ["dying"],
"lie": ["lying"],
"tie": ["tying"],
"news": ["news"],
"inning": ["innings", "inning"],
"outing": ["outings", "outing"],
"canning": ["cannings", "canning"],
"howe": ["howe"],
"proceed": ["proceed"],
"exceed": ["exceed"],
"succeed": ["succeed"],
}
self.pool = {}
for key in irregular_forms:
for val in irregular_forms[key]:
self.pool[val] = key
self.vowels = frozenset(['a', 'e', 'i', 'o', 'u'])
def _is_consonant(self, word, i):
"""Returns True if word[i] is a consonant, False otherwise
A consonant is defined in the paper as follows:
A consonant in a word is a letter other than A, E, I, O or
U, and other than Y preceded by a consonant. (The fact that
the term `consonant' is defined to some extent in terms of
itself does not make it ambiguous.) So in TOY the consonants
are T and Y, and in SYZYGY they are S, Z and G. If a letter
is not a consonant it is a vowel.
"""
if word[i] in self.vowels:
return False
if word[i] == 'y':
if i == 0:
return True
else:
return (not self._is_consonant(word, i - 1))
return True
def _measure(self, stem):
"""Returns the 'measure' of stem, per definition in the paper
From the paper:
A consonant will be denoted by c, a vowel by v. A list
ccc... of length greater than 0 will be denoted by C, and a
list vvv... of length greater than 0 will be denoted by V.
Any word, or part of a word, therefore has one of the four
forms:
CVCV ... C
CVCV ... V
VCVC ... C
VCVC ... V
These may all be represented by the single form
[C]VCVC ... [V]
where the square brackets denote arbitrary presence of their
contents. Using (VC){m} to denote VC repeated m times, this
may again be written as
[C](VC){m}[V].
m will be called the \measure\ of any word or word part when
represented in this form. The case m = 0 covers the null
word. Here are some examples:
m=0 TR, EE, TREE, Y, BY.
m=1 TROUBLE, OATS, TREES, IVY.
m=2 TROUBLES, PRIVATE, OATEN, ORRERY.
"""
cv_sequence = ''
# Construct a string of 'c's and 'v's representing whether each
# character in `stem` is a consonant or a vowel.
# e.g. 'falafel' becomes 'cvcvcvc',
# 'architecture' becomes 'vcccvcvccvcv'
for i in range(len(stem)):
if self._is_consonant(stem, i):
cv_sequence += 'c'
else:
cv_sequence += 'v'
# Count the number of 'vc' occurences, which is equivalent to
# the number of 'VC' occurrences in Porter's reduced form in the
# docstring above, which is in turn equivalent to `m`
return cv_sequence.count('vc')
def _has_positive_measure(self, stem):
return self._measure(stem) > 0
def _contains_vowel(self, stem):
"""Returns True if stem contains a vowel, else False"""
for i in range(len(stem)):
if not self._is_consonant(stem, i):
return True
return False
def _ends_double_consonant(self, word):
"""Implements condition *d from the paper
Returns True if word ends with a double consonant
"""
return (
len(word) >= 2 and
word[-1] == word[-2] and
self._is_consonant(word, len(word) - 1)
)
def _ends_cvc(self, word):
"""Implements condition *o from the paper
From the paper:
*o - the stem ends cvc, where the second c is not W, X or Y
(e.g. -WIL, -HOP).
"""
return (
len(word) >= 3 and
self._is_consonant(word, len(word) - 3) and
not self._is_consonant(word, len(word) - 2) and
self._is_consonant(word, len(word) - 1) and
word[-1] not in ('w', 'x', 'y')
) or (
self.mode == self.NLTK_EXTENSIONS and
len(word) == 2 and
not self._is_consonant(word, 0) and
self._is_consonant(word, 1)
)
def _replace_suffix(self, word, suffix, replacement):
"""Replaces `suffix` of `word` with `replacement"""
assert word.endswith(suffix), "Given word doesn't end with given suffix"
if suffix == '':
return word + replacement
else:
return word[:-len(suffix)] + replacement
def _apply_rule_list(self, word, rules):
"""Applies the first applicable suffix-removal rule to the word
Takes a word and a list of suffix-removal rules represented as
3-tuples, with the first element being the suffix to remove,
the second element being the string to replace it with, and the
final element being the condition for the rule to be applicable,
or None if the rule is unconditional.
"""
for rule in rules:
suffix, replacement, condition = rule
if suffix == '*d' and self._ends_double_consonant(word):
stem = word[:-2]
if condition is None or condition(stem):
return stem + replacement
else:
# Don't try any further rules
return word
if word.endswith(suffix):
stem = self._replace_suffix(word, suffix, '')
if condition is None or condition(stem):
return stem + replacement
else:
# Don't try any further rules
return word
return word
def _step1a(self, word):
"""Implements Step 1a from "An algorithm for suffix stripping"
From the paper:
SSES -> SS caresses -> caress
IES -> I ponies -> poni
ties -> ti
SS -> SS caress -> caress
S -> cats -> cat
"""
# this NLTK-only rule extends the original algorithm, so
# that 'flies'->'fli' but 'dies'->'die' etc
if self.mode == self.NLTK_EXTENSIONS:
if word.endswith('ies') and len(word) == 4:
return self._replace_suffix(word, 'ies', 'ie')
return self._apply_rule_list(word, [
('sses', 'ss', None), # SSES -> SS
('ies', 'i', None), # IES -> I
('ss', 'ss', None), # SS -> SS
('s', '', None), # S ->
])
def _step1b(self, word):
"""Implements Step 1b from "An algorithm for suffix stripping"
From the paper:
(m>0) EED -> EE feed -> feed
agreed -> agree
(*v*) ED -> plastered -> plaster
bled -> bled
(*v*) ING -> motoring -> motor
sing -> sing
If the second or third of the rules in Step 1b is successful,
the following is done:
AT -> ATE conflat(ed) -> conflate
BL -> BLE troubl(ed) -> trouble
IZ -> IZE siz(ed) -> size
(*d and not (*L or *S or *Z))
-> single letter
hopp(ing) -> hop
tann(ed) -> tan
fall(ing) -> fall
hiss(ing) -> hiss
fizz(ed) -> fizz
(m=1 and *o) -> E fail(ing) -> fail
fil(ing) -> file
The rule to map to a single letter causes the removal of one of
the double letter pair. The -E is put back on -AT, -BL and -IZ,
so that the suffixes -ATE, -BLE and -IZE can be recognised
later. This E may be removed in step 4.
"""
# this NLTK-only block extends the original algorithm, so that
# 'spied'->'spi' but 'died'->'die' etc
if self.mode == self.NLTK_EXTENSIONS:
if word.endswith('ied'):
if len(word) == 4:
return self._replace_suffix(word, 'ied', 'ie')
else:
return self._replace_suffix(word, 'ied', 'i')
# (m>0) EED -> EE
if word.endswith('eed'):
stem = self._replace_suffix(word, 'eed', '')
if self._measure(stem) > 0:
return stem + 'ee'
else:
return word
rule_2_or_3_succeeded = False
for suffix in ['ed', 'ing']:
if word.endswith(suffix):
intermediate_stem = self._replace_suffix(word, suffix, '')
if self._contains_vowel(intermediate_stem):
rule_2_or_3_succeeded = True
break
if not rule_2_or_3_succeeded:
return word
return self._apply_rule_list(intermediate_stem, [
('at', 'ate', None), # AT -> ATE
('bl', 'ble', None), # BL -> BLE
('iz', 'ize', None), # IZ -> IZE
# (*d and not (*L or *S or *Z))
# -> single letter
(
'*d',
intermediate_stem[-1],
lambda stem: intermediate_stem[-1] not in ('l', 's', 'z')
),
# (m=1 and *o) -> E
(
'',
'e',
lambda stem: (self._measure(stem) == 1 and
self._ends_cvc(stem))
),
])
def _step1c(self, word):
"""Implements Step 1c from "An algorithm for suffix stripping"
From the paper:
Step 1c
(*v*) Y -> I happy -> happi
sky -> sky
"""
def nltk_condition(stem):
"""
This has been modified from the original Porter algorithm so
that y->i is only done when y is preceded by a consonant,
but not if the stem is only a single consonant, i.e.
(*c and not c) Y -> I
So 'happy' -> 'happi', but
'enjoy' -> 'enjoy' etc
This is a much better rule. Formerly 'enjoy'->'enjoi' and
'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but
with this modification that no longer really matters.
Also, the removal of the contains_vowel(z) condition means
that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and
conflate with 'spied', 'tried', 'flies' ...
"""
return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1)
def original_condition(stem):
return self._contains_vowel(stem)
return self._apply_rule_list(word, [
(
'y',
'i',
nltk_condition if self.mode == self.NLTK_EXTENSIONS
else original_condition
)
])
def _step2(self, word):
"""Implements Step 2 from "An algorithm for suffix stripping"
From the paper:
Step 2
(m>0) ATIONAL -> ATE relational -> relate
(m>0) TIONAL -> TION conditional -> condition
rational -> rational
(m>0) ENCI -> ENCE valenci -> valence
(m>0) ANCI -> ANCE hesitanci -> hesitance
(m>0) IZER -> IZE digitizer -> digitize
(m>0) ABLI -> ABLE conformabli -> conformable
(m>0) ALLI -> AL radicalli -> radical
(m>0) ENTLI -> ENT differentli -> different
(m>0) ELI -> E vileli - > vile
(m>0) OUSLI -> OUS analogousli -> analogous
(m>0) IZATION -> IZE vietnamization -> vietnamize
(m>0) ATION -> ATE predication -> predicate
(m>0) ATOR -> ATE operator -> operate
(m>0) ALISM -> AL feudalism -> feudal
(m>0) IVENESS -> IVE decisiveness -> decisive
(m>0) FULNESS -> FUL hopefulness -> hopeful
(m>0) OUSNESS -> OUS callousness -> callous
(m>0) ALITI -> AL formaliti -> formal
(m>0) IVITI -> IVE sensitiviti -> sensitive
(m>0) BILITI -> BLE sensibiliti -> sensible
"""
if self.mode == self.NLTK_EXTENSIONS:
# Instead of applying the ALLI -> AL rule after '(a)bli' per
# the published algorithm, instead we apply it first, and,
# if it succeeds, run the result through step2 again.
if (
word.endswith('alli') and
self._has_positive_measure(
self._replace_suffix(word, 'alli', '')
)
):
return self._step2(
self._replace_suffix(word, 'alli', 'al')
)
bli_rule = ('bli', 'ble', self._has_positive_measure)
abli_rule = ('abli', 'able', self._has_positive_measure)
rules = [
('ational', 'ate', self._has_positive_measure),
('tional', 'tion', self._has_positive_measure),
('enci', 'ence', self._has_positive_measure),
('anci', 'ance', self._has_positive_measure),
('izer', 'ize', self._has_positive_measure),
abli_rule if self.mode == self.ORIGINAL_ALGORITHM else bli_rule,
('alli', 'al', self._has_positive_measure),
('entli', 'ent', self._has_positive_measure),
('eli', 'e', self._has_positive_measure),
('ousli', 'ous', self._has_positive_measure),
('ization', 'ize', self._has_positive_measure),
('ation', 'ate', self._has_positive_measure),
('ator', 'ate', self._has_positive_measure),
('alism', 'al', self._has_positive_measure),
('iveness', 'ive', self._has_positive_measure),
('fulness', 'ful', self._has_positive_measure),
('ousness', 'ous', self._has_positive_measure),
('aliti', 'al', self._has_positive_measure),
('iviti', 'ive', self._has_positive_measure),
('biliti', 'ble', self._has_positive_measure),
]
if self.mode == self.NLTK_EXTENSIONS:
rules.append(
('fulli', 'ful', self._has_positive_measure)
)
# The 'l' of the 'logi' -> 'log' rule is put with the stem,
# so that short stems like 'geo' 'theo' etc work like
# 'archaeo' 'philo' etc.
rules.append((
"logi",
"log",
lambda stem: self._has_positive_measure(word[:-3])
))
if self.mode == self.MARTIN_EXTENSIONS:
rules.append(
("logi", "log", self._has_positive_measure)
)
return self._apply_rule_list(word, rules)
def _step3(self, word):
"""Implements Step 3 from "An algorithm for suffix stripping"
From the paper:
Step 3
(m>0) ICATE -> IC triplicate -> triplic
(m>0) ATIVE -> formative -> form
(m>0) ALIZE -> AL formalize -> formal
(m>0) ICITI -> IC electriciti -> electric
(m>0) ICAL -> IC electrical -> electric
(m>0) FUL -> hopeful -> hope
(m>0) NESS -> goodness -> good
"""
return self._apply_rule_list(word, [
('icate', 'ic', self._has_positive_measure),
('ative', '', self._has_positive_measure),
('alize', 'al', self._has_positive_measure),
('iciti', 'ic', self._has_positive_measure),
('ical', 'ic', self._has_positive_measure),
('ful', '', self._has_positive_measure),
('ness', '', self._has_positive_measure),
])
def _step4(self, word):
"""Implements Step 4 from "An algorithm for suffix stripping"
Step 4
(m>1) AL -> revival -> reviv
(m>1) ANCE -> allowance -> allow
(m>1) ENCE -> inference -> infer
(m>1) ER -> airliner -> airlin
(m>1) IC -> gyroscopic -> gyroscop
(m>1) ABLE -> adjustable -> adjust
(m>1) IBLE -> defensible -> defens
(m>1) ANT -> irritant -> irrit
(m>1) EMENT -> replacement -> replac
(m>1) MENT -> adjustment -> adjust
(m>1) ENT -> dependent -> depend
(m>1 and (*S or *T)) ION -> adoption -> adopt
(m>1) OU -> homologou -> homolog
(m>1) ISM -> communism -> commun
(m>1) ATE -> activate -> activ
(m>1) ITI -> angulariti -> angular
(m>1) OUS -> homologous -> homolog
(m>1) IVE -> effective -> effect
(m>1) IZE -> bowdlerize -> bowdler
The suffixes are now removed. All that remains is a little
tidying up.
"""
measure_gt_1 = lambda stem: self._measure(stem) > 1
return self._apply_rule_list(word, [
('al', '', measure_gt_1),
('ance', '', measure_gt_1),
('ence', '', measure_gt_1),
('er', '', measure_gt_1),
('ic', '', measure_gt_1),
('able', '', measure_gt_1),
('ible', '', measure_gt_1),
('ant', '', measure_gt_1),
('ement', '', measure_gt_1),
('ment', '', measure_gt_1),
('ent', '', measure_gt_1),
# (m>1 and (*S or *T)) ION ->
(
'ion',
'',
lambda stem: self._measure(stem) > 1 and stem[-1] in ('s', 't')
),
('ou', '', measure_gt_1),
('ism', '', measure_gt_1),
('ate', '', measure_gt_1),
('iti', '', measure_gt_1),
('ous', '', measure_gt_1),
('ive', '', measure_gt_1),
('ize', '', measure_gt_1),
])
def _step5a(self, word):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5a
(m>1) E -> probate -> probat
rate -> rate
(m=1 and not *o) E -> cease -> ceas
"""
# Note that Martin's test vocabulary and reference
# implementations are inconsistent in how they handle the case
# where two rules both refer to a suffix that matches the word
# to be stemmed, but only the condition of the second one is
# true.
# Earlier in step2b we had the rules:
# (m>0) EED -> EE
# (*v*) ED ->
# but the examples in the paper included "feed"->"feed", even
# though (*v*) is true for "fe" and therefore the second rule
# alone would map "feed"->"fe".
# However, in THIS case, we need to handle the consecutive rules
# differently and try both conditions (obviously; the second
# rule here would be redundant otherwise). Martin's paper makes
# no explicit mention of the inconsistency; you have to infer it
# from the examples.
# For this reason, we can't use _apply_rule_list here.
if word.endswith('e'):
stem = self._replace_suffix(word, 'e', '')
if self._measure(stem) > 1:
return stem
if self._measure(stem) == 1 and not self._ends_cvc(stem):
return stem
return word
def _step5b(self, word):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5b
(m > 1 and *d and *L) -> single letter
controll -> control
roll -> roll
"""
return self._apply_rule_list(word, [
('ll', 'l', lambda stem: self._measure(word[:-1]) > 1)
])
def stem(self, word):
stem = word.lower()
if self.mode == self.NLTK_EXTENSIONS and word in self.pool:
return self.pool[word]
if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2:
# With this line, strings of length 1 or 2 don't go through
# the stemming process, although no mention is made of this
# in the published algorithm.
return word
stem = self._step1a(stem)
stem = self._step1b(stem)
stem = self._step1c(stem)
stem = self._step2(stem)
stem = self._step3(stem)
stem = self._step4(stem)
stem = self._step5a(stem)
stem = self._step5b(stem)
return stem
def __repr__(self):
return '<PorterStemmer>'
| aristo-leaderboard-master | propara/evaluator/text/stemmer.py |
from errors.errors import corrupted_action_file, corrupted_sentences_file
| aristo-leaderboard-master | propara/evaluator/errors/__init__.py |
import sys
def corrupted_action_file(filename: str, details: str, line_num: int = None):
if line_num is None:
print(f"Corrupted or empty action file {filename} ({details})")
else:
print(f"Corrupted action file {filename} on line {line_num} ({details})")
sys.exit(2)
def corrupted_sentences_file(filename: str, details: str):
print(f"Corrupted or empty sentences file {filename} ({details})")
sys.exit(2)
| aristo-leaderboard-master | propara/evaluator/errors/errors.py |
# Locations
NO_LOCATION = 'null' # This location is used of a participant that doesn't exist (was destroyed, or not yet created)
LOCATION_UNKNOWN = 'unk'
# Actions
NO_ACTION = 'NONE'
MOVE = 'MOVE'
CREATE = 'CREATE'
DESTROY = 'DESTROY'
| aristo-leaderboard-master | propara/evaluator/process/constants.py |
from process.process import Process, Conversion, Move, Input, Output
from process.summary import ProcessSummary
from process.action_file import ActionFile
from process.sentence_file import sentences_from_sentences_file
| aristo-leaderboard-master | propara/evaluator/process/__init__.py |
from collections import defaultdict
from typing import Dict, List, Tuple
def sentences_from_sentences_file(sentences_filename: str) -> Dict[int, List[str]]:
all_sentences = dict() # type: Dict[Tuple[int, int], str]
with open(sentences_filename) as f:
for line in f:
process_id_str, sentence_number_str, text = line.strip().split('\t', 3)[:3]
process_id = int(process_id_str)
sentence_number = int(sentence_number_str)
all_sentences[(process_id, sentence_number)] = text
sentences_by_process = defaultdict(list) # type: Dict[int, List[str]]
for key, sentence in sorted(all_sentences.items()):
process_id, sentence_number = key
sentences_by_process[process_id].append(sentence)
return sentences_by_process
| aristo-leaderboard-master | propara/evaluator/process/sentence_file.py |
from typing import Dict, List, NamedTuple
from process.process import Conversion, Move, Input, Output
class ProcessSummary(NamedTuple):
process_id: int
inputs: List[Input]
outputs: List[Output]
conversions: List[Conversion]
moves: List[Move]
def __repr__(self):
return f"Process {self.process_id}" \
f" inputs({self.inputs})" \
f" outputs({self.outputs})" \
f" conversions({self.conversions})" \
f" moves({self.moves})"
def diagnostics(self) -> Dict:
return {
"process_id": self.process_id,
"inputs": self._inputs_diagnostics(),
"outputs": self._outputs_diagnostics(),
"conversions": self._conversions_diagnostics(),
"moves": self._moves_diagnostics(),
}
def _inputs_diagnostics(self):
inputs = []
for i in self.inputs:
inputs.append(i.participants)
if len(inputs) > 0:
return {"participants": inputs}
return {"participants": None}
def _outputs_diagnostics(self):
outputs = []
for i in self.outputs:
outputs.append(i.participants)
if len(outputs) > 0:
return {"participants": outputs}
return {"participants": None}
def _conversions_diagnostics(self):
conversions = []
for c in self.conversions:
conversions.append({
"participants_destroyed": c.destroyed,
"participants_created": c.created,
"location": c.locations,
"step_number": int(c.step_id),
})
if len(conversions) > 0:
return conversions
return None
def _moves_diagnostics(self):
moves = []
for m in self.moves:
moves.append({
"participants": m.participants,
"location_before": m.location_before,
"location_after": m.location_after,
"step_number": int(m.step_id),
})
if len(moves) > 0:
return moves
return None
| aristo-leaderboard-master | propara/evaluator/process/summary.py |
import unittest
from collections import OrderedDict
from process import process, Process, Conversion, Move, Input, Output
from process.constants import NO_ACTION as NO_ACT, NO_LOCATION as NO_LOC, CREATE, DESTROY, MOVE
class TestProcess(unittest.TestCase):
def test_qa(self):
p = Process(
process_id=514,
locations=OrderedDict([
('glacier', [NO_LOC, NO_LOC, NO_LOC, NO_LOC, NO_LOC, NO_LOC, 'area', 'area']),
('snow', ['area', 'area', 'area', 'area', NO_LOC, NO_LOC, NO_LOC, NO_LOC]),
('mass', [NO_LOC, NO_LOC, NO_LOC, NO_LOC, NO_LOC, 'area', 'area', 'area'])
]),
actions=OrderedDict([
('glacier', [NO_ACT, NO_ACT, NO_ACT, NO_ACT, NO_ACT, CREATE, NO_ACT]),
('snow', [NO_ACT, NO_ACT, NO_ACT, DESTROY, NO_ACT, NO_ACT, NO_ACT]),
('mass', [NO_ACT, NO_ACT, NO_ACT, NO_ACT, CREATE, NO_ACT, NO_ACT])
]),
num_steps=7,
)
self.assertEquals(p.inputs(), [
Input(participants='snow')
])
self.assertEquals(p.outputs(), [
Output(participants='glacier'),
Output(participants='mass')
])
self.assertEquals(p.conversions(), [
Conversion(destroyed='snow', created='mass', locations='area', step_id='4')
])
self.assertEquals(p.moves(), [])
p = Process(
process_id=540,
locations=OrderedDict([
('air', ['unk', 'unk', 'unk', 'bronchiole', 'alveolus', 'unk', 'unk', 'unk', 'unk', 'unk', 'unk']),
('oxygen', ['unk', 'unk', 'unk', 'unk', 'unk', 'bloodstream', 'unk', 'unk', 'unk', 'unk', 'unk']),
('carbon dioxide',
['unk', 'unk', 'unk', 'unk', 'unk', 'bloodstream', 'bloodstream', 'alveolus', 'bronchiole', 'lung',
'body'])
]),
actions=OrderedDict([
('air', [NO_ACT, NO_ACT, MOVE, MOVE, MOVE, NO_ACT, NO_ACT, NO_ACT, NO_ACT, NO_ACT]),
('oxygen', [NO_ACT, NO_ACT, NO_ACT, NO_ACT, MOVE, MOVE, NO_ACT, NO_ACT, NO_ACT, NO_ACT]),
('carbon dioxide', [NO_ACT, NO_ACT, NO_ACT, NO_ACT, MOVE, NO_ACT, MOVE, MOVE, MOVE, MOVE])
]),
num_steps=10,
)
self.assertEquals(p.inputs(), [])
self.assertEquals(p.outputs(), [])
self.assertEquals(p.conversions(), [])
self.assertEquals(p.moves(), [
Move(participants='air', location_before='unk', location_after='bronchiole', step_id='3'),
Move(participants='air', location_before='bronchiole', location_after='alveolus', step_id='4'),
Move(participants='air', location_before='alveolus', location_after='unk', step_id='5'),
Move(participants='oxygen', location_before='unk', location_after='bloodstream', step_id='5'),
Move(participants='oxygen', location_before='bloodstream', location_after='unk', step_id='6'),
Move(participants='carbon dioxide', location_before='unk', location_after='bloodstream', step_id='5'),
Move(participants='carbon dioxide', location_before='bloodstream', location_after='alveolus', step_id='7'),
Move(participants='carbon dioxide', location_before='alveolus', location_after='bronchiole', step_id='8'),
Move(participants='carbon dioxide', location_before='bronchiole', location_after='lung', step_id='9'),
Move(participants='carbon dioxide', location_before='lung', location_after='body', step_id='10'),
])
def test_is_this_action_seq_of_an_input(self):
self.assertFalse(process._is_this_action_seq_of_an_input([NO_ACT, CREATE, DESTROY, NO_ACT]))
self.assertFalse(process._is_this_action_seq_of_an_input([CREATE, DESTROY, NO_ACT, NO_ACT]))
def test_summarize_participants(self):
self.assertEquals('gasoline OR gas', process._summarize_participants('gasoline; gas'))
self.assertEquals('gasoline OR gas', process._summarize_participants('gasoline;gas'))
def test_split_participants(self):
self.assertEquals(['gasoline', 'gas'], process._split_participants('gasoline; gas'))
self.assertEquals(['gasoline', 'gas'], process._split_participants('gasoline;gas'))
if __name__ == '__main__':
unittest.main()
| aristo-leaderboard-master | propara/evaluator/process/test_process.py |
from collections import OrderedDict, defaultdict
from typing import NamedTuple, Dict, List
from errors import corrupted_action_file
from process.constants import LOCATION_UNKNOWN, NO_LOCATION, NO_ACTION, CREATE, MOVE, DESTROY
from process import ProcessSummary, Process
def _accumulate_action(locations, actions, num_steps, participant, action, before_location, after_location, step_id):
existing_locations = locations.setdefault(participant, [LOCATION_UNKNOWN] * (1 + num_steps))
existing_actions = actions.setdefault(participant, [NO_ACTION] * num_steps)
if step_id == 1:
existing_locations[0] = before_location
existing_locations[step_id] = after_location
existing_actions[step_id - 1] = action
return locations, actions
def _num_sentences_in_actions_file(actions_filename: str) -> Dict[int, int]:
num_sentences = defaultdict(int) # type: Dict[int, int]
with open(actions_filename) as f:
line_num = 0
for line in f:
line_num += 1
try:
process_id_str, step_id_str = line.strip().split('\t', 2)[:2]
except ValueError as e:
corrupted_action_file(
filename=actions_filename,
line_num=line_num,
details=str(e)
)
process_id = int(process_id_str)
step_id = int(step_id_str)
num_sentences[process_id] = max(num_sentences[process_id], step_id)
if not num_sentences:
corrupted_action_file(actions_filename, "no lines to iterate")
return num_sentences
class ActionFile(NamedTuple):
filename: str
# key = process_id
# value = OrderedDict like this:
# key = participant string (like "water vapor ; lifted vapor ; vapor")
# value = list of location strings, length = 1 + number of sentences
locations: Dict[int, Dict[str, List[str]]]
# key = process_id
# value = OrderedDict like this:
# key = participant string (like "water vapor ; lifted vapor ; vapor")
# value = list of actions (CREATE, DESTROY, MOVE or NONE), length = number of sentences
actions: Dict[int, Dict[str, List[str]]]
# key = process_id
# value = number of sentences per process
num_sentences: Dict[int, int]
def has_process_id(self, process_id: int):
return process_id in self.locations
def summarize(self) -> Dict[int, ProcessSummary]:
summary_by_process_id = dict() # type: Dict[int, ProcessSummary]
for process_id in self.locations.keys():
locations = self.locations[process_id]
actions = self.actions[process_id]
p = Process(process_id=process_id, locations=locations, actions=actions,
num_steps=self.num_sentences[process_id])
summary_by_process_id[p.process_id] = ProcessSummary(
process_id=p.process_id,
inputs=p.inputs(),
outputs=p.outputs(),
conversions=p.conversions(),
moves=p.moves(),
)
return summary_by_process_id
def diff_participants(self, other: "ActionFile") -> List[str]:
report: List[str] = []
for process_id in self.process_ids():
self_participants = self.participants(process_id)
if not other.has_process_id(process_id):
report.append(f"Process {process_id} missing in {other.filename}")
continue
other_participants = other.participants(process_id)
process_report: List[str] = []
for p in self_participants:
if p not in other_participants:
process_report.append(f"Process {process_id} in {other.filename}: participant \"{p}\" is missing.")
for op in other_participants:
if op not in self_participants:
process_report.append(
f"Process {process_id} in {other.filename}: participant \"{op}\" is unexpected.")
report += sorted(process_report)
return report
def process_ids(self) -> List[int]:
return sorted(self.locations.keys())
def participants(self, process_id) -> List[str]:
return sorted(self.locations[process_id].keys())
# Reads an actionfile from disk.
@classmethod
def from_file(cls, action_filename: str) -> "ActionFile":
num_sentences = _num_sentences_in_actions_file(action_filename)
locations = defaultdict(OrderedDict) # type: Dict[int, Dict[str, List[str]]]
actions = defaultdict(OrderedDict) # type: Dict[int, Dict[str, List[str]]]
line_num = 0
with open(action_filename) as f:
for line in f:
line_num += 1
try:
process_id_str, step_id_str, participant, action, before_location, after_location = \
line.strip("\n\r").split('\t', 6)[:6]
except ValueError as e:
corrupted_action_file(
filename=action_filename,
line_num=line_num,
details=str(e)
)
process_id = int(process_id_str)
step_id = int(step_id_str)
if action == NO_ACTION:
if before_location != after_location:
corrupted_action_file(
filename=action_filename,
line_num=line_num,
details=f"Unequal NONE locations: {before_location} -- {after_location}"
)
elif action == CREATE:
if before_location != '-':
corrupted_action_file(
filename=action_filename,
line_num=line_num,
details=f"Invalid CREATE before_location: {before_location}"
)
before_location = NO_LOCATION
if after_location == "" or after_location == '-':
corrupted_action_file(
filename=action_filename,
line_num=line_num,
details=f"Invalid CREATE after_location: {after_location}"
)
elif action == DESTROY:
if before_location == "" or before_location == '-':
corrupted_action_file(
filename=action_filename,
line_num=line_num,
details=f"Invalid DESTROY before_location: {before_location}"
)
if after_location != '-':
corrupted_action_file(
filename=action_filename,
line_num=line_num,
details=f"Invalid DESTROY after_location: {after_location}"
)
elif action == MOVE:
if before_location == "" or before_location == '-':
corrupted_action_file(
filename=action_filename,
line_num=line_num,
details=f"Invalid MOVE before_location: {before_location}"
)
if after_location == "" or after_location == '-':
corrupted_action_file(
filename=action_filename,
line_num=line_num,
details=f"Invalid MOVE after_location: {after_location}"
)
else:
corrupted_action_file(
filename=action_filename,
line_num=line_num,
details=f"Invalid action: {action}"
)
if before_location == "-":
before_location = NO_LOCATION
elif before_location == "?":
before_location = LOCATION_UNKNOWN
if after_location == "-":
after_location = NO_LOCATION
elif after_location == "?":
after_location = LOCATION_UNKNOWN
# update locations and actions for this process_id
locations[process_id], actions[process_id] = \
_accumulate_action(
locations[process_id],
actions[process_id],
num_sentences[process_id],
participant,
action,
before_location,
after_location,
step_id,
)
if not locations:
corrupted_action_file(action_filename, "no lines to iterate")
return cls(
filename=action_filename,
locations=locations,
actions=actions,
num_sentences=num_sentences
)
| aristo-leaderboard-master | propara/evaluator/process/action_file.py |
from typing import List, NamedTuple, Dict
from process.constants import NO_LOCATION, CREATE, DESTROY, MOVE
class Input(NamedTuple):
participants: str
class Output(NamedTuple):
participants: str
class Conversion(NamedTuple):
created: str
destroyed: str
locations: str
step_id: str
class Move(NamedTuple):
participants: str
location_before: str
location_after: str
step_id: str
class Process(NamedTuple):
process_id: int
locations: Dict
actions: Dict
num_steps: int
# Q1: What are the inputs?
# - If a participant exists in state1, but does not exist in the end stateN, it's an input.
def inputs(self) -> List[Input]:
inputs = [] # type: List[Input]
for participant in self.locations.keys():
actions = self.actions[participant]
if _is_this_action_seq_of_an_input(actions):
inputs.append(Input(participants=_summarize_participants(participant)))
return inputs
# Q2: What are the outputs
# - If a participant does not exist in state1, but exists in the end stateN, it's an output.
def outputs(self) -> List[Output]:
outputs = [] # type: List[Output]
for participant in self.locations.keys():
actions = self.actions[participant]
if _is_this_action_seq_of_an_output(actions):
outputs.append(Output(participants=_summarize_participants(participant)))
return outputs
# Q3: What is converted?
# tuple: (participant-list-from, participant-list-to, loc-list, step-id)
# a. For any event with BOTH "D" and "C" in:
# The "D" participants are converted to the "C" participants at the union of the D and C locations
# b. IF an event has ONLY "D" but no "C" in ("M" is ok - irrelevant)
# AND the NEXT event has ONLY "C" but no "D" in ("M" is ok - irrelevant)
# THEN the "D" participants are converted to the "C" participants at the union of the D and C locations
def conversions(self) -> List[Conversion]:
conversions = [] # type: List[Conversion]
for step_id in range(1, self.num_steps + 1):
(created, c_locations) = self._get_created_at_step(step_id)
(destroyed, d_locations) = self._get_destroyed_at_step(step_id)
if created and destroyed:
conversions.append(Conversion(
destroyed=_conjunction(*destroyed),
created=_conjunction(*created),
locations=_conjunction(*set(c_locations + d_locations)),
step_id=str(step_id)
))
elif destroyed and step_id < self.num_steps - 1:
(created2, c_locations2) = self._get_created_at_step(step_id + 1)
(destroyed2, d_locations2) = self._get_destroyed_at_step(step_id + 1)
created_but_not_destroyed = set(created2) - set(destroyed)
if not destroyed2 and created_but_not_destroyed:
conversions.append(Conversion(
destroyed=_conjunction(*destroyed),
created=_conjunction(*created_but_not_destroyed),
locations=_conjunction(*set(c_locations2 + d_locations)),
step_id=str(step_id)
))
elif created and step_id < self.num_steps - 1:
(created2, c_locations2) = self._get_created_at_step(step_id + 1)
(destroyed2, d_locations2) = self._get_destroyed_at_step(step_id + 1)
destroyed_but_not_created = set(destroyed2) - set(created)
if not created2 and destroyed_but_not_created:
conversions.append(Conversion(
destroyed=_conjunction(*destroyed_but_not_created),
created=_conjunction(*created),
locations=_conjunction(*set(c_locations + d_locations2)),
step_id=str(step_id)
))
return conversions
# Q4: What is moved?
# tuple: (participant, from-loc, to-loc, step-id)
# return all moves
def moves(self):
moves = []
for participant in self.locations.keys():
locations = self.locations[participant]
actions = self.actions[participant]
for step_id in range(1, len(locations)):
is_moved = actions[step_id - 1] == MOVE or (
locations[step_id - 1] != NO_LOCATION and
locations[step_id] != NO_LOCATION and
locations[step_id - 1] != locations[step_id]
)
if not is_moved:
continue
moves.append(Move(
participants=_summarize_participants(participant),
location_before=locations[step_id - 1],
location_after=locations[step_id],
step_id=str(step_id)
))
return moves
def _get_created_at_step(self, step_id: int):
created = []
locations = []
for participant in self.locations.keys():
state_values = self.locations[participant]
is_creation = state_values[step_id - 1] == NO_LOCATION \
and state_values[step_id] != NO_LOCATION
if is_creation:
created.append(_summarize_participants(participant))
locations.append(state_values[step_id])
return created, locations
def _get_destroyed_at_step(self, step_id: int):
destroyed = []
locations = []
for participant in self.locations.keys():
state_values = self.locations[participant]
is_destruction = state_values[step_id - 1] != NO_LOCATION \
and state_values[step_id] == NO_LOCATION
if is_destruction:
destroyed.append(_summarize_participants(participant))
locations.append(state_values[step_id - 1])
return destroyed, locations
def _is_this_action_seq_of_an_output(actions) -> bool:
for action_id, _ in enumerate(actions):
no_destroy_move_before = DESTROY not in actions[0:action_id] and MOVE not in actions[0:action_id]
current_create = actions[action_id] == CREATE
no_destroy_later = DESTROY not in actions[action_id + 1:]
if no_destroy_move_before and current_create and no_destroy_later:
return True
return False
def _is_this_action_seq_of_an_input(actions) -> bool:
for action_id, _ in enumerate(actions):
no_create_before = CREATE not in actions[0:action_id] # last action_id must be checked
current_destroy = actions[action_id] == DESTROY
no_create_move_later = CREATE not in actions[action_id + 1:] and MOVE not in actions[action_id + 1:]
if no_create_before and current_destroy and no_create_move_later:
return True
return False
def _split_participants(participant) -> List[str]:
return [p.strip() for p in participant.split(';')]
def _summarize_participants(participant) -> str:
return ' OR '.join(_split_participants(participant))
def _conjunction(*things) -> str:
return ' AND '.join(things)
| aristo-leaderboard-master | propara/evaluator/process/process.py |
import unittest
from collections import OrderedDict
from process.action_file import ActionFile
from process.constants import NO_ACTION as NO_ACT
from process.constants import NO_LOCATION as NO_LOC, CREATE, DESTROY, MOVE
class TestSummarize(unittest.TestCase):
def test_load(self):
# Spot-check values loaded from an action file
actionfile = ActionFile.from_file('testfiles-0/answers.tsv')
# Process 514
self.assertEquals(
OrderedDict([
('glacier', [NO_LOC, NO_LOC, NO_LOC, NO_LOC, NO_LOC, NO_LOC, 'area', 'area']),
('mass', [NO_LOC, NO_LOC, NO_LOC, NO_LOC, NO_LOC, 'area', 'area', 'area']),
('snow', ['area', 'area', 'area', 'area', NO_LOC, NO_LOC, NO_LOC, NO_LOC]),
]),
actionfile.locations[514],
)
self.assertEquals(
OrderedDict([
('glacier', [NO_ACT, NO_ACT, NO_ACT, NO_ACT, NO_ACT, CREATE, NO_ACT]),
('mass', [NO_ACT, NO_ACT, NO_ACT, NO_ACT, CREATE, NO_ACT, NO_ACT]),
('snow', [NO_ACT, NO_ACT, NO_ACT, DESTROY, NO_ACT, NO_ACT, NO_ACT]),
]),
actionfile.actions[514],
)
self.assertEquals(7, actionfile.num_sentences[514])
# Process 540
self.assertEquals(
OrderedDict([
('air', ['unk', 'unk', 'unk', 'bronchiole', 'alveolus', 'unk', 'unk', 'unk', 'unk', 'unk', 'unk']),
('carbon dioxide',
['unk', 'unk', 'unk', 'unk', 'unk', 'bloodstream', 'bloodstream', 'alveolus', 'bronchiole', 'lung',
'body']),
('oxygen', ['unk', 'unk', 'unk', 'unk', 'unk', 'bloodstream', 'unk', 'unk', 'unk', 'unk', 'unk']),
]),
actionfile.locations[540],
)
self.assertEquals(
OrderedDict([
('air', [NO_ACT, NO_ACT, MOVE, MOVE, MOVE, NO_ACT, NO_ACT, NO_ACT, NO_ACT, NO_ACT]),
('carbon dioxide', [NO_ACT, NO_ACT, NO_ACT, NO_ACT, MOVE, NO_ACT, MOVE, MOVE, MOVE, MOVE]),
('oxygen', [NO_ACT, NO_ACT, NO_ACT, NO_ACT, MOVE, MOVE, NO_ACT, NO_ACT, NO_ACT, NO_ACT]),
]),
actionfile.actions[540]
)
self.assertEquals(10, actionfile.num_sentences[540])
if __name__ == '__main__':
unittest.main()
| aristo-leaderboard-master | propara/evaluator/process/test_action_file.py |
import os
import evaluator
import unittest
import tempfile
import typing
class TestAccuracy(unittest.TestCase):
def test_EverythingCorrect(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["A"]}
self.assertEqual(3.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_EverythingWrong(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["B"], "Q2": ["B"], "Q3": ["B"]}
self.assertEqual(0.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_MixedResults(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["B"]}
self.assertEqual(2.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_PartialGuess(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A", "B"], "Q2": ["B"], "Q3": ["B"]}
self.assertEqual(0.5 / 3, evaluator.calculate_accuracy(qa, p))
def test_ExtraPredictions(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["B"], "QExtra": ["X"]}
with self.assertRaises(SystemExit) as context:
evaluator.calculate_accuracy(qa, p)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_EXTRA)
def test_MissingPredictions(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"]}
with self.assertRaises(SystemExit) as context:
evaluator.calculate_accuracy(qa, p)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTION_MISSING)
def temp_file_with_contents(lines: typing.List[str]) -> str:
t = tempfile.NamedTemporaryFile(mode='wt', delete=False)
t.writelines(lines)
t.close()
return t.name
class TestReadAnswers(unittest.TestCase):
def test_ReadAnswers(self):
t = temp_file_with_contents([
'{"id": "Q1", "answerKey": "A"}\n',
'{"id": "Q2", "answerKey": "B"}\n',
'{"id": "Q3", "answerKey": "C"}\n',
])
answers = evaluator.read_answers(t)
os.remove(t)
self.assertEqual(answers, {"Q1": "A", "Q2": "B", "Q3": "C"})
def test_ReadAnswersEmpty(self):
t = temp_file_with_contents([])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
def test_ReadAnswersCorrupted(self):
t = temp_file_with_contents(['this is not json'])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
def test_ReadAnswersRepeated(self):
t = temp_file_with_contents([
'{"id": "Q1", "answerKey": "A"}\n',
'{"id": "Q1", "answerKey": "B"}\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
class TestReadPredictions(unittest.TestCase):
def test_ReadPredictions(self):
t = temp_file_with_contents([
'Q1,A\n',
'"Q2",A;B\n',
'Q3,"A;B;C"\n',
])
predictions = evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(predictions, {
"Q1": ["A"],
"Q2": ["A", "B"],
"Q3": ["A", "B", "C"],
})
def test_ReadPredictionsMissingColumn(self):
t = temp_file_with_contents([
'Q1,A\n',
'"Q2"\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsRepeated(self):
t = temp_file_with_contents([
'Q1,A\n',
'Q1,A\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsCorruptedEmptyKey(self):
t = temp_file_with_contents([
',A\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsCorruptedEmptyLabels(self):
t = temp_file_with_contents([
'Q1,A;\n',
])
with self.assertRaises(SystemExit) as context:
p = evaluator.read_predictions(t)
print(p)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
if __name__ == '__main__':
unittest.main()
| aristo-leaderboard-master | openbookqa/evaluator/test_evaluator.py |
#!/usr/bin/env python3
import csv
from typing import *
import logging
import sys
import json
EXIT_STATUS_ANSWERS_MALFORMED = 1
EXIT_STATUS_PREDICTIONS_MALFORMED = 2
EXIT_STATUS_PREDICTIONS_EXTRA = 3
EXIT_STATUS_PREDICTION_MISSING = 4
def calculate_accuracy(question_answers: Dict[str, str], predictions: Dict[str, List[str]]) -> float:
score = 0.0
for question_id, answer in question_answers.items():
try:
predictions_for_q = predictions[question_id]
except KeyError:
logging.error("Missing prediction for question '%s'.", question_id)
sys.exit(EXIT_STATUS_PREDICTION_MISSING)
if answer in predictions_for_q:
score += 1.0 / len(predictions_for_q)
del predictions[question_id]
if len(predictions) > 0:
logging.error("Found %d extra predictions, for example: %s", len(predictions),
", ".join(list(predictions.keys())[:3]))
sys.exit(EXIT_STATUS_PREDICTIONS_EXTRA)
return score / len(question_answers)
def read_answers(filename: str) -> Dict[str, str]:
answers = {}
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
for line in f:
line = line.strip()
try:
record = json.loads(line)
except ValueError as e:
logging.error("Error while reading file %s: %s", filename, e)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
question_id = record["id"]
answer = record["answerKey"]
if question_id in answers:
logging.error("Key %s repeated in %s", question_id, filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
answers[question_id] = answer
if len(answers) == 0:
logging.error("No answers found in file %s", filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
return answers
def read_predictions(filename: str) -> Dict[str, List[str]]:
predictions = {}
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
reader = csv.reader(f)
try:
for row in reader:
try:
question_id = row[0]
prediction_raw = row[1]
except IndexError as e:
logging.error("Error reading value from CSV file %s on line %d: %s", filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if question_id in predictions:
logging.error("Key %s repeated in file %s on line %d", question_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if question_id == "":
logging.error("Key is empty in file %s on line %d", filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
prediction = prediction_raw.split(";")
# prediction labels cannot be empty strings
for p in prediction:
if p == "":
logging.error("Key %s has empty labels for prediction in file %s on line %d",
question_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
predictions[question_id] = prediction
except csv.Error as e:
logging.error('file %s, line %d: %s', filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
return predictions
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for questions.')
parser.add_argument(
'--question-answers', '-qa',
help='Filename of the question answers to read. Expects a JSONL file with documents that have field "id" and "answerKey".',
required=True)
parser.add_argument(
'--predictions', '-p',
help="Filename of the leaderboard predictions, in CSV format.",
required=True)
parser.add_argument(
'--output', '-o',
help='Output results to this file.',
required=True)
args = parser.parse_args()
question_answers = read_answers(args.question_answers)
predictions = read_predictions(args.predictions)
accuracy = calculate_accuracy(question_answers, predictions)
with open(args.output, "wt", encoding="UTF-8") as output:
output.write(json.dumps({"accuracy": accuracy}))
if __name__ == '__main__':
main()
| aristo-leaderboard-master | openbookqa/evaluator/evaluator.py |
import os
import evaluator
import unittest
import tempfile
import typing
class TestAccuracy(unittest.TestCase):
def test_EverythingCorrect(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["A"]}
self.assertEqual(3.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_EverythingWrong(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["B"], "Q2": ["B"], "Q3": ["B"]}
self.assertEqual(0.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_MixedResults(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["B"]}
self.assertEqual(2.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_PartialGuess(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A", "B"], "Q2": ["B"], "Q3": ["B"]}
self.assertEqual(0.5 / 3, evaluator.calculate_accuracy(qa, p))
def test_ExtraPredictions(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["B"], "QExtra": ["X"]}
with self.assertRaises(SystemExit) as context:
evaluator.calculate_accuracy(qa, p)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_EXTRA)
def test_MissingPredictions(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"]}
with self.assertRaises(SystemExit) as context:
evaluator.calculate_accuracy(qa, p)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTION_MISSING)
def temp_file_with_contents(lines: typing.List[str]) -> str:
t = tempfile.NamedTemporaryFile(mode='wt', delete=False)
t.writelines(lines)
t.close()
return t.name
class TestReadAnswers(unittest.TestCase):
def test_ReadAnswers(self):
t = temp_file_with_contents([
'{"id": "Q1", "answerKey": "A"}\n',
'{"id": "Q2", "answerKey": "B"}\n',
'{"id": "Q3", "answerKey": "C"}\n',
])
answers = evaluator.read_answers(t)
os.remove(t)
self.assertEqual(answers, {"Q1": "A", "Q2": "B", "Q3": "C"})
def test_ReadAnswersEmpty(self):
t = temp_file_with_contents([])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
def test_ReadAnswersCorrupted(self):
t = temp_file_with_contents(['this is not json'])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
def test_ReadAnswersRepeated(self):
t = temp_file_with_contents([
'{"id": "Q1", "answerKey": "A"}\n',
'{"id": "Q1", "answerKey": "B"}\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
class TestReadPredictions(unittest.TestCase):
def test_ReadPredictions(self):
t = temp_file_with_contents([
'Q1,A\n',
'"Q2",A;B\n',
'Q3,"A;B;C"\n',
])
predictions = evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(predictions, {
"Q1": ["A"],
"Q2": ["A", "B"],
"Q3": ["A", "B", "C"],
})
def test_ReadPredictionsMissingColumn(self):
t = temp_file_with_contents([
'Q1,A\n',
'"Q2"\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsRepeated(self):
t = temp_file_with_contents([
'Q1,A\n',
'Q1,A\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsCorruptedEmptyKey(self):
t = temp_file_with_contents([
',A\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsCorruptedEmptyLabels(self):
t = temp_file_with_contents([
'Q1,A;\n',
])
with self.assertRaises(SystemExit) as context:
p = evaluator.read_predictions(t)
print(p)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
if __name__ == '__main__':
unittest.main()
| aristo-leaderboard-master | qasc/evaluator/test_evaluator.py |
#!/usr/bin/env python3
import csv
from typing import *
import logging
import sys
import json
EXIT_STATUS_ANSWERS_MALFORMED = 1
EXIT_STATUS_PREDICTIONS_MALFORMED = 2
EXIT_STATUS_PREDICTIONS_EXTRA = 3
EXIT_STATUS_PREDICTION_MISSING = 4
def calculate_accuracy(question_answers: Dict[str, str], predictions: Dict[str, List[str]]) -> float:
score = 0.0
for question_id, answer in question_answers.items():
try:
predictions_for_q = predictions[question_id]
except KeyError:
logging.error("Missing prediction for question '%s'.", question_id)
sys.exit(EXIT_STATUS_PREDICTION_MISSING)
if answer in predictions_for_q:
score += 1.0 / len(predictions_for_q)
del predictions[question_id]
if len(predictions) > 0:
logging.error("Found %d extra predictions, for example: %s", len(predictions),
", ".join(list(predictions.keys())[:3]))
sys.exit(EXIT_STATUS_PREDICTIONS_EXTRA)
return score / len(question_answers)
def read_answers(filename: str) -> Dict[str, str]:
answers = {}
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
for line in f:
line = line.strip()
try:
record = json.loads(line)
except ValueError as e:
logging.error("Error while reading file %s: %s", filename, e)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
question_id = record["id"]
answer = record["answerKey"]
if question_id in answers:
logging.error("Key %s repeated in %s", question_id, filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
answers[question_id] = answer
if len(answers) == 0:
logging.error("No answers found in file %s", filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
return answers
def read_predictions(filename: str) -> Dict[str, List[str]]:
predictions = {}
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
reader = csv.reader(f)
try:
for row in reader:
try:
question_id = row[0]
prediction_raw = row[1]
except IndexError as e:
logging.error("Error reading value from CSV file %s on line %d: %s", filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if question_id in predictions:
logging.error("Key %s repeated in file %s on line %d", question_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if question_id == "":
logging.error("Key is empty in file %s on line %d", filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
prediction = prediction_raw.split(";")
# prediction labels cannot be empty strings
for p in prediction:
if p == "":
logging.error("Key %s has empty labels for prediction in file %s on line %d",
question_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
predictions[question_id] = prediction
except csv.Error as e:
logging.error('file %s, line %d: %s', filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
return predictions
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for questions.')
parser.add_argument(
'--question-answers', '-qa',
help='Filename of the question answers to read. Expects a JSONL file with documents that have field "id" and "answerKey".',
required=True)
parser.add_argument(
'--predictions', '-p',
help="Filename of the leaderboard predictions, in CSV format.",
required=True)
parser.add_argument(
'--output', '-o',
help='Output results to this file.',
required=True)
args = parser.parse_args()
question_answers = read_answers(args.question_answers)
predictions = read_predictions(args.predictions)
accuracy = calculate_accuracy(question_answers, predictions)
with open(args.output, "wt", encoding="UTF-8") as output:
output.write(json.dumps({"accuracy": accuracy}))
if __name__ == '__main__':
main()
| aristo-leaderboard-master | qasc/evaluator/evaluator.py |
import os
import evaluator
import unittest
import tempfile
import typing
class TestAccuracy(unittest.TestCase):
def test_EverythingCorrect(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["A"]}
self.assertEqual(3.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_EverythingWrong(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["B"], "Q2": ["B"], "Q3": ["B"]}
self.assertEqual(0.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_MixedResults(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["B"]}
self.assertEqual(2.0 / 3.0, evaluator.calculate_accuracy(qa, p))
def test_PartialGuess(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A", "B"], "Q2": ["B"], "Q3": ["B"]}
self.assertEqual(0.5 / 3, evaluator.calculate_accuracy(qa, p))
def test_ExtraPredictions(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"], "Q3": ["B"], "QExtra": ["X"]}
with self.assertRaises(SystemExit) as context:
evaluator.calculate_accuracy(qa, p)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_EXTRA)
def test_MissingPredictions(self):
qa = {"Q1": "A", "Q2": "A", "Q3": "A"}
p = {"Q1": ["A"], "Q2": ["A"]}
with self.assertRaises(SystemExit) as context:
evaluator.calculate_accuracy(qa, p)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTION_MISSING)
def temp_file_with_contents(lines: typing.List[str]) -> str:
t = tempfile.NamedTemporaryFile(mode='wt', delete=False)
t.writelines(lines)
t.close()
return t.name
class TestReadAnswers(unittest.TestCase):
def test_ReadAnswers(self):
t = temp_file_with_contents([
'{"id": "Q1", "answerKey": "A"}\n',
'{"id": "Q2", "answerKey": "B"}\n',
'{"id": "Q3", "answerKey": "C"}\n',
])
answers = evaluator.read_answers(t)
os.remove(t)
self.assertEqual(answers, {"Q1": "A", "Q2": "B", "Q3": "C"})
def test_ReadAnswersEmpty(self):
t = temp_file_with_contents([])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
def test_ReadAnswersCorrupted(self):
t = temp_file_with_contents(['this is not json'])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
def test_ReadAnswersRepeated(self):
t = temp_file_with_contents([
'{"id": "Q1", "answerKey": "A"}\n',
'{"id": "Q1", "answerKey": "B"}\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_answers(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_ANSWERS_MALFORMED)
class TestReadPredictions(unittest.TestCase):
def test_ReadPredictions(self):
t = temp_file_with_contents([
'Q1,A\n',
'"Q2",A;B\n',
'Q3,"A;B;C"\n',
])
predictions = evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(predictions, {
"Q1": ["A"],
"Q2": ["A", "B"],
"Q3": ["A", "B", "C"],
})
def test_ReadPredictionsMissingColumn(self):
t = temp_file_with_contents([
'Q1,A\n',
'"Q2"\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsRepeated(self):
t = temp_file_with_contents([
'Q1,A\n',
'Q1,A\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsCorruptedEmptyKey(self):
t = temp_file_with_contents([
',A\n',
])
with self.assertRaises(SystemExit) as context:
evaluator.read_predictions(t)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
def test_ReadPredictionsCorruptedEmptyLabels(self):
t = temp_file_with_contents([
'Q1,A;\n',
])
with self.assertRaises(SystemExit) as context:
p = evaluator.read_predictions(t)
print(p)
os.remove(t)
self.assertEqual(context.exception.code, evaluator.EXIT_STATUS_PREDICTIONS_MALFORMED)
if __name__ == '__main__':
unittest.main()
| aristo-leaderboard-master | arc/evaluator/test_evaluator.py |
#!/usr/bin/env python3
import csv
from typing import *
import logging
import sys
import json
EXIT_STATUS_ANSWERS_MALFORMED = 1
EXIT_STATUS_PREDICTIONS_MALFORMED = 2
EXIT_STATUS_PREDICTIONS_EXTRA = 3
EXIT_STATUS_PREDICTION_MISSING = 4
def calculate_accuracy(question_answers: Dict[str, str], predictions: Dict[str, List[str]]) -> float:
score = 0.0
for question_id, answer in question_answers.items():
try:
predictions_for_q = predictions[question_id]
except KeyError:
logging.error("Missing prediction for question '%s'.", question_id)
sys.exit(EXIT_STATUS_PREDICTION_MISSING)
if answer in predictions_for_q:
score += 1.0 / len(predictions_for_q)
del predictions[question_id]
if len(predictions) > 0:
logging.error("Found %d extra predictions, for example: %s", len(predictions),
", ".join(list(predictions.keys())[:3]))
sys.exit(EXIT_STATUS_PREDICTIONS_EXTRA)
return score / len(question_answers)
def read_answers(filename: str) -> Dict[str, str]:
answers = {}
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
for line in f:
line = line.strip()
try:
record = json.loads(line)
except ValueError as e:
logging.error("Error while reading file %s: %s", filename, e)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
question_id = record["id"]
answer = record["answerKey"]
if question_id in answers:
logging.error("Key %s repeated in %s", question_id, filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
answers[question_id] = answer
if len(answers) == 0:
logging.error("No answers found in file %s", filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
return answers
def read_predictions(filename: str) -> Dict[str, List[str]]:
predictions = {}
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
reader = csv.reader(f)
try:
for row in reader:
try:
question_id = row[0]
prediction_raw = row[1]
except IndexError as e:
logging.error("Error reading value from CSV file %s on line %d: %s", filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if question_id in predictions:
logging.error("Key %s repeated in file %s on line %d", question_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if question_id == "":
logging.error("Key is empty in file %s on line %d", filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
prediction = prediction_raw.split(";")
# prediction labels cannot be empty strings
for p in prediction:
if p == "":
logging.error("Key %s has empty labels for prediction in file %s on line %d",
question_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
predictions[question_id] = prediction
except csv.Error as e:
logging.error('file %s, line %d: %s', filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
return predictions
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for questions.')
parser.add_argument(
'--question-answers', '-qa',
help='Filename of the question answers to read. Expects a JSONL file with documents that have field "id" and "answerKey".',
required=True)
parser.add_argument(
'--predictions', '-p',
help="Filename of the leaderboard predictions, in CSV format.",
required=True)
parser.add_argument(
'--output', '-o',
help='Output results to this file.',
required=True)
args = parser.parse_args()
question_answers = read_answers(args.question_answers)
predictions = read_predictions(args.predictions)
accuracy = calculate_accuracy(question_answers, predictions)
with open(args.output, "wt", encoding="UTF-8") as output:
output.write(json.dumps({"accuracy": accuracy}))
if __name__ == '__main__':
main()
| aristo-leaderboard-master | arc/evaluator/evaluator.py |
import ast
import hashlib
import json
import os
from collections import defaultdict
from typing import Tuple, Sequence, Dict, Optional, Union, Any, Set
import compress_pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas
import pandas as pd
from filelock import FileLock
from allenact.utils.misc_utils import (
bootstrap_max_of_subset_statistic,
expected_max_of_subset_statistic,
all_equal,
)
from minigrid_and_pd_scripts.compute_random_performance_for_task import (
TASK_TO_RANDOM_PERFORMANCE,
)
from projects.advisor.advisor_constants import (
METHOD_ORDER,
METHOD_TO_COLOR,
METHOD_TO_LINE_MARKER,
EXPERIMENT_STR_TO_LABEL_DICT,
)
from projects.advisor.lighthouse_scripts.summarize_pairwise_imitation_data import (
set_size,
)
from projects.advisor.minigrid_constants import ENV_NAMES_TO_TITLE
plt.rc("font", **{"family": "serif", "serif": ["CMU"], "size": 16})
plt.rc("xtick", labelsize=12)
plt.rc("ytick", labelsize=12)
plt.rc("text", usetex=True)
plt.rc("text.latex", preamble=r"\usepackage{amsmath}")
METRIC_TO_LABEL = {
"reward": "Reward",
"rewards": "Reward",
"avg_ep_length": "Avg. Ep. Length",
"success": "Success",
}
def unzip(xs):
a = None
n = None
for x in xs:
if n is None:
n = len(x)
a = [[] for _ in range(n)]
for i, y in enumerate(x):
a[i].append(y)
return a
def add_columns_to_df(df):
keys = ["alpha_start", "alpha_stop", "fixed_alpha", "lr", "tf_ratio"]
for key in keys + ["pretty_label"]:
df[key] = [None] * df.shape[0]
def read_config_kwargs_str(config_kwargs_str):
if config_kwargs_str == "" or config_kwargs_str is None:
return {}
elif isinstance(config_kwargs_str, Dict):
return config_kwargs_str
else:
try:
return json.loads(config_kwargs_str)
except Exception:
return ast.literal_eval(config_kwargs_str)
df.loc[:, "config_kwargs"] = [
read_config_kwargs_str(config_kwargs_str)
for config_kwargs_str in df.loc[:, "config_kwargs_str"]
]
for i in range(df.shape[0]):
row = df.loc[i, :]
config_kwargs: Dict[str, Any] = row["config_kwargs"]
for key in keys:
df.loc[i, key] = config_kwargs.get(key.upper(), None)
for i in range(df.shape[0]):
df.loc[i, "pretty_label"] = run_info_to_pretty_label(dict(df.loc[i, :]))
return df
def plot_max_hp_curves(
x_to_y_list: Sequence[Dict[Union[int, float], float]],
x_to_bootstrap_ys_list: Sequence[Dict[Union[int, float], Sequence[float]]],
method_labels: Sequence[str],
colors: Sequence[Tuple[int, int, int]],
line_styles: Optional[Sequence] = None,
line_markers: Optional[Sequence] = None,
title: str = "",
xlabel: str = "",
ylabel: str = "",
fig_size=(4, 4 * 3.0 / 5.0),
save_path: Optional[str] = None,
put_legend_outside: bool = True,
include_legend: bool = False,
performance_of_random_agent: Optional[float] = None,
best_inds_to_highlight: Optional[Set] = None,
):
"""Plots E[max(metric | n hp runs)] curves.
For more information on studying sensitivity of methods to
hyperparameter tuning, refer to Dodge et al. EMNLP 2019
https://arxiv.org/abs/1909.03004
"""
line_styles = ["solid"] * len(colors) if line_styles is None else line_styles
line_markers = [""] * len(colors) if line_markers is None else line_markers
plt.grid(
b=True,
which="major",
color=np.array([0.93, 0.93, 0.93]),
linestyle="-",
zorder=-2,
)
plt.minorticks_on()
plt.grid(
b=True,
which="minor",
color=np.array([0.97, 0.97, 0.97]),
linestyle="-",
zorder=-2,
)
ax = plt.gca()
ax.set_axisbelow(True)
# Hide the right and top spines
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
if best_inds_to_highlight is None:
best_inds_to_highlight = set(range(len(x_to_y_list)))
xscaled = False
for (
index,
(x_to_y, x_to_bootstrap_ys, method_label, color, line_style, line_marker,),
) in enumerate(
zip(
x_to_y_list,
x_to_bootstrap_ys_list,
method_labels,
colors,
line_styles,
line_markers,
)
):
xvals = list(sorted(x_to_bootstrap_ys.keys()))
points_list = [x_to_bootstrap_ys[x] for x in xvals]
points = [x_to_y[x] for x in xvals]
should_highlight = index in best_inds_to_highlight
if max(xvals) > 1e3:
xscaled = True
xvals = [x / 1e6 for x in xvals]
try:
lower, _, upper = unzip(
[np.percentile(points, [25, 50, 75]) for points in points_list]
)
except Exception as _:
print(
"Could not generate max_hp_curve for {}, too few points".format(
method_label
)
)
continue
if performance_of_random_agent is not None:
xvals = [0] + xvals
points = [performance_of_random_agent] + points
lower = [performance_of_random_agent] + lower
upper = [performance_of_random_agent] + upper
plt.gca().fill_between(
xvals,
lower,
upper,
color=np.array(color + (25 if should_highlight else 0,)) / 255,
zorder=1,
)
plot_kwargs = dict(
lw=2.5,
linestyle=line_style,
marker=line_marker,
markersize=8,
markevery=4 if len(xvals) > 10 else 1,
zorder=2,
)
label = (
r"{}.{}".format(index + 1, "\ \ " if index + 1 < 10 else " ") + method_label
)
color = np.array(color + (255,)) / 255
plt.plot([], [], label=label, color=color, **plot_kwargs) # FOR LEGEND ONLY
if not should_highlight:
color = np.array(color)
color[3] = 0.1
plt.plot(xvals, points, color=color, **plot_kwargs)
plt.title(title)
plt.xlabel(xlabel + (r"(Millions)" if xscaled and len(xlabel) != 0 else r""))
plt.ylabel(ylabel)
plt.ticklabel_format(style="plain")
plt.tight_layout()
if include_legend:
if put_legend_outside:
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc="center left", bbox_to_anchor=(1, 0.5))
else:
plt.legend()
set_size(*fig_size)
if save_path is None:
plt.show()
else:
plt.savefig(
save_path, bbox_inches="tight",
)
plt.close()
print(f"Figure saved to {save_path}")
def create_comparison_hp_plots_from_tsv(
num_hp_evals_for_steps_plot: int,
tsv_file_path: str,
highlight_best: bool,
overwrite=True,
include_legend: bool = False,
hide_labels: bool = False,
):
assert os.path.exists(tsv_file_path)
file_dir, file_name = os.path.split(tsv_file_path)
with open(tsv_file_path, "r") as f:
tsv_hash = str(hashlib.md5(f.read().encode()).hexdigest())
df = pd.read_csv(tsv_file_path, sep="\t")
df = add_columns_to_df(df)
env_type_key = "env"
assert (
df[env_type_key] == df[env_type_key][0]
).all(), "env must be the same for all elements of df"
task_name = df[env_type_key][0]
del df[env_type_key]
df = df.sort_values(by=["exp_type", "seed"])
group_keys = ["exp_type"]
df_grouped = df.groupby(by=group_keys)
df_grouped_lists = df_grouped.agg(list)
# One sort index, based on the first metric
for metric_key in [
"reward",
# "success",
# "avg_ep_length",
]:
if not os.path.exists(file_dir):
print("IN WRONG DIRECTORY.")
else:
plots_dir = os.path.join(file_dir, "neurips21_plots", task_name)
os.makedirs(plots_dir, exist_ok=True)
box_save_path = os.path.join(
plots_dir,
"{}__box_{}_{}.pdf".format(
file_name.replace(".tsv", ""), task_name, metric_key,
),
)
if (not overwrite) and os.path.exists(box_save_path):
print(
"Plot {} exists and overwrite is `False`, skipping...".format(
box_save_path
)
)
continue
tsv_summary_dir = os.path.join(file_dir, "neurips21_summaries")
os.makedirs(tsv_summary_dir, exist_ok=True)
tsv_summary_save_path = os.path.join(
tsv_summary_dir, f"{metric_key}__all_results.tsv"
)
grouped_df_index = df_grouped_lists.index.to_frame(index=False)
method_keys = list(grouped_df_index["exp_type"])
sort_index = [
ind
for _, ind in sorted(
[
(METHOD_ORDER.index(method_key), sort_ind)
if method_key in METHOD_ORDER
else 1e6
for sort_ind, method_key in enumerate(method_keys)
if method_key in METHOD_ORDER
]
)
]
colors = [
METHOD_TO_COLOR.get(method_keys[ind], (0, 0, 0),) for ind in sort_index
]
line_styles = None
line_markers = [
METHOD_TO_LINE_MARKER.get(method_keys[ind], "",) for ind in sort_index
]
sorted_multi_index = [
tuple(grouped_df_index.loc[ind, :]) for ind in sort_index
]
sorted_multi_index = [
x if len(x) != 1 else x[0] for x in sorted_multi_index
]
result_lens = {
multi_ind: len(df_grouped_lists.loc[multi_ind, metric_key])
for multi_ind in sorted_multi_index
}
print(result_lens)
print(sum(result_lens.values()))
points_list = [
list(
map(ast.literal_eval, df_grouped_lists.loc[multi_ind, metric_key],)
)
for multi_ind in sorted_multi_index
]
exp_to_ckpt_training_steps_lists = [
df_grouped_lists.loc[multi_ind, "train_steps"]
for multi_ind in sorted_multi_index
]
assert all(all_equal(l) for l in exp_to_ckpt_training_steps_lists)
exp_ind_to_ckpt_training_steps = [
ast.literal_eval(training_steps_list[0])
for training_steps_list in exp_to_ckpt_training_steps_lists
]
pretty_label_lists = [
df_grouped_lists.loc[multi_ind, "pretty_label"]
for multi_ind in sorted_multi_index
]
assert all(all_equal(l) for l in pretty_label_lists)
yticklabels = [l[0] for l in pretty_label_lists]
subset_size_to_bootstrap_points_list = []
subset_size_to_expected_mas_est_list = []
ckpt_to_bootstrap_points_list = []
ckpt_to_expected_mas_est_list = []
print("Starting expected max reward computations")
for i in range(len(points_list)):
print(f"Computing expected max {metric_key} for {yticklabels[i]}")
vals_per_ckpt_mat = np.array(
points_list[i]
) # each col corresponds to a checkpoint
training_steps_inds_to_skip = []
training_steps = exp_ind_to_ckpt_training_steps[i]
cache_path = os.path.join(
plots_dir, "cache", f"{tsv_hash}_{i}_{metric_key}.pkl.gz"
)
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
if os.path.exists(cache_path):
cache = compress_pickle.load(cache_path)
ckpt_to_expected_mas_est_list.append(
cache["ckpt_to_expected_mas_est"]
)
ckpt_to_bootstrap_points_list.append(
cache["ckpt_to_bootstrap_points"]
)
subset_size_to_expected_mas_est_list.append(
cache["subset_size_to_expected_mas_est"]
)
subset_size_to_bootstrap_points_list.append(
cache["subset_size_to_bootstrap_points"]
)
else:
for j in range(len(training_steps) - 1):
# Skip some weird cases where checkpoints were saved too closely
if (training_steps[j + 1] - training_steps[j]) / training_steps[
-1
] < 0.05:
training_steps_inds_to_skip.append(j)
ckpt_to_expected_mas_est_list.append(
{
training_steps: expected_max_of_subset_statistic(
vals_per_ckpt_mat[:, j], m=num_hp_evals_for_steps_plot
)
for j, training_steps in enumerate(training_steps)
if j not in training_steps_inds_to_skip
}
)
ckpt_to_bootstrap_points_list.append(
{
training_steps: bootstrap_max_of_subset_statistic(
vals_per_ckpt_mat[:, j],
m=num_hp_evals_for_steps_plot,
reps=500,
seed=j,
)
for j, training_steps in enumerate(training_steps)
if j not in training_steps_inds_to_skip
}
)
max_subset_size = len(points_list[i]) + 1 - 5
subset_size_to_expected_mas_est_list.append(
{
m: expected_max_of_subset_statistic(
vals_per_ckpt_mat[:, -1], m=m
)
for m in range(1, max_subset_size)
}
)
subset_size_to_bootstrap_points_list.append(
{
m: bootstrap_max_of_subset_statistic(
vals_per_ckpt_mat[:, -1], m=m, reps=500, seed=m
)
for m in range(1, max_subset_size)
}
)
cache = {}
cache["ckpt_to_expected_mas_est"] = ckpt_to_expected_mas_est_list[
-1
]
cache["ckpt_to_bootstrap_points"] = ckpt_to_bootstrap_points_list[
-1
]
cache[
"subset_size_to_expected_mas_est"
] = subset_size_to_expected_mas_est_list[-1]
cache[
"subset_size_to_bootstrap_points"
] = subset_size_to_bootstrap_points_list[-1]
compress_pickle.dump(cache, cache_path)
color_to_best_val_and_index = defaultdict(lambda: (-float("inf"), -1))
color_to_inds = defaultdict(lambda: [])
for ind, c0 in enumerate(colors):
color_to_inds[c0].append(ind)
final_y = list(sorted(ckpt_to_expected_mas_est_list[ind].items()))[-1][
1
]
if final_y > color_to_best_val_and_index[c0][0]:
color_to_best_val_and_index[c0] = (final_y, ind)
best_inds_to_highlight = set(
v[1] for v in color_to_best_val_and_index.values()
)
plot_max_hp_curves(
x_to_y_list=ckpt_to_expected_mas_est_list,
x_to_bootstrap_ys_list=ckpt_to_bootstrap_points_list,
method_labels=yticklabels,
xlabel=("Training Steps" if not hide_labels else ""),
ylabel=(
f"Expected {METRIC_TO_LABEL[metric_key]}" if not hide_labels else ""
),
colors=colors,
line_styles=line_styles,
line_markers=line_markers,
fig_size=(3 * 1.05, 3 * 1.05),
save_path=box_save_path.replace("_box_", "_train_steps_"),
put_legend_outside=True,
include_legend=include_legend,
title=(ENV_NAMES_TO_TITLE[task_name] if not hide_labels else ""),
performance_of_random_agent=TASK_TO_RANDOM_PERFORMANCE.get(
task_name, {}
).get(metric_key, None),
best_inds_to_highlight=best_inds_to_highlight
if highlight_best
else None,
)
def save_expected_rewards_tsv(
task_name: str,
x_to_y_list: Sequence[Dict[Union[int, float], float]],
method_labels: Sequence[str],
save_path: str,
grouped_inds_list: Sequence[Sequence[int]],
):
def all_nearly_equal(seq):
s = seq[0]
return all(abs(s - ss) / min(s, ss) < 0.01 for ss in seq)
with FileLock(save_path + ".lock"):
if os.path.exists(save_path):
df = pandas.read_csv(save_path, sep="\t")
assert list(df["method"]) == method_labels
else:
df = pandas.DataFrame(data={"method": method_labels})
assert all_nearly_equal(
[max(x_to_y.keys()) for x_to_y in x_to_y_list]
)
if task_name in df.columns:
del df[task_name]
values_at_end_of_training = [
x_to_y[max(x_to_y.keys())] for x_to_y in x_to_y_list
]
df[task_name] = values_at_end_of_training
df = df.reindex(["method"] + list(sorted(df.columns[1:])), axis=1)
df.to_csv(save_path, sep="\t", index=False, float_format="%.2f")
save_path = save_path.replace(".tsv", "_group.tsv")
with FileLock(save_path + ".lock"):
grouped_method_labels = [
method_labels[inds[0]] for inds in grouped_inds_list
]
if os.path.exists(save_path):
df = pandas.read_csv(save_path, sep="\t")
assert list(df["method"]) == grouped_method_labels
else:
df = pandas.DataFrame(data={"method": grouped_method_labels})
grouped_values_at_end_of_training = [
max(values_at_end_of_training[i] for i in inds)
for inds in grouped_inds_list
]
df[task_name] = grouped_values_at_end_of_training
df = df.reindex(["method"] + list(sorted(df.columns[1:])), axis=1)
df.to_csv(save_path, sep="\t", index=False, float_format="%.2f")
save_expected_rewards_tsv(
task_name=ENV_NAMES_TO_TITLE[task_name],
x_to_y_list=ckpt_to_expected_mas_est_list,
method_labels=yticklabels,
save_path=tsv_summary_save_path,
grouped_inds_list=[
color_to_inds[k] for k in sorted(color_to_inds.keys())
],
)
plot_max_hp_curves(
x_to_y_list=subset_size_to_expected_mas_est_list,
x_to_bootstrap_ys_list=subset_size_to_bootstrap_points_list,
method_labels=yticklabels,
xlabel=("$N$" if not hide_labels else ""),
ylabel=(
f"\emph{{Robust{METRIC_TO_LABEL[metric_key]}@$N$}}"
if not hide_labels
else ""
),
colors=colors,
line_styles=line_styles,
line_markers=line_markers,
fig_size=(3 * 1.05, 3 * 1.05),
save_path=box_save_path.replace("_box_", "_hpruns_"),
put_legend_outside=False,
include_legend=False,
title=(ENV_NAMES_TO_TITLE[task_name] if not hide_labels else ""),
best_inds_to_highlight=best_inds_to_highlight
if highlight_best
else None,
)
def run_info_to_pretty_label(run_info: Dict[str, Optional[Union[int, str, float]]]):
exp_type = run_info["exp_type"]
return EXPERIMENT_STR_TO_LABEL_DICT[exp_type]
| advisor-main | summarization_utils.py |
"""Defining the PPO loss for actor critic type models."""
import abc
import math
from typing import Dict, Union, Optional, Tuple, cast, Callable
import numpy as np
import torch
import torch.nn.functional as F
from stable_baselines3.common.running_mean_std import RunningMeanStd
from allenact.algorithms.offpolicy_sync.losses.abstract_offpolicy_loss import (
AbstractOffPolicyLoss,
)
from allenact.algorithms.onpolicy_sync.losses import A2C, PPO
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact.utils.experiment_utils import Builder
from allenact.utils.tensor_utils import to_device_recursively
from gail_models import MiniGridDiscriminator
class AlphaScheduler(abc.ABC):
def next(self, step_count: int, *args, **kwargs):
raise NotImplementedError
class LinearAlphaScheduler(AlphaScheduler):
def __init__(self, start: float, end: float, total_steps: int):
self.start = start
self.end = end
self.total_steps = total_steps
def next(self, step_count: int, *args, **kwargs):
p = min(step_count / self.total_steps, 1)
return self.start * (1.0 - p) + self.end * p
class AdvisorImitationStage(AbstractActorCriticLoss):
"""Implementation of the Advisor loss' stage 1 when main and auxiliary
actors are equally weighted."""
def loss( # type: ignore
self,
step_count: int,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs
):
# Imitation calculation
observations = cast(Dict[str, torch.Tensor], batch["observations"])
if "expert_action" not in observations:
raise NotImplementedError(
"AdvisorImitationStage loss requires that an `expert_action` is given as input"
)
expert_actions_and_mask = observations["expert_action"]
assert expert_actions_and_mask.shape[-1] == 2
expert_actions_and_mask_reshaped = expert_actions_and_mask.view(-1, 2)
expert_actions = expert_actions_and_mask_reshaped[:, 0].view(
*expert_actions_and_mask.shape[:-1], 1
)
expert_actions_masks = (
expert_actions_and_mask_reshaped[:, 1]
.float()
.view(*expert_actions_and_mask.shape[:-1], 1)
)
expert_successes = expert_actions_masks.sum()
should_report_loss = expert_successes.item() != 0
main_expert_log_probs = actor_critic_output.distributions.log_prob(
cast(torch.LongTensor, expert_actions)
)
aux_expert_log_probs = actor_critic_output.extras[
"auxiliary_distributions"
].log_prob(cast(torch.LongTensor, expert_actions))
assert main_expert_log_probs.shape == aux_expert_log_probs.shape
assert (
main_expert_log_probs.shape[: len(expert_actions_masks.shape)]
== expert_actions_masks.shape
)
# Add dimensions to `expert_actions_masks` on the right to allow for masking
# if necessary.
len_diff = len(main_expert_log_probs.shape) - len(expert_actions_masks.shape)
assert len_diff >= 0
expert_actions_masks = expert_actions_masks.view(
*expert_actions_masks.shape, *((1,) * len_diff)
)
aux_expert_ce_loss = -(
expert_actions_masks * aux_expert_log_probs
).sum() / torch.clamp(expert_successes, min=1)
main_expert_ce_loss = -(
expert_actions_masks * main_expert_log_probs
).sum() / torch.clamp(expert_successes, min=1)
total_loss = main_expert_ce_loss + aux_expert_ce_loss
return (
total_loss,
{
"main_expert_ce_loss": main_expert_ce_loss.item(),
"aux_expert_ce_loss": aux_expert_ce_loss.item(),
"total_loss": total_loss.item(),
}
if should_report_loss
else {},
)
class AdvisorWeightedStage(AbstractActorCriticLoss):
"""Implementation of the Advisor loss' second stage (simplest variant).
# Attributes
rl_loss: The RL loss to use, should be a loss object of type `PPO` or `A2C`
(or a `Builder` that when called returns such a loss object).
alpha : Exponent to use when reweighting the expert cross entropy loss.
Larger alpha means an (exponentially) smaller weight assigned to the cross entropy
loss. E.g. if a the weight with alpha=1 is 0.6 then with alpha=2 it is 0.6^2=0.36.
bound : If the distance from the auxilary policy to expert policy is greater than
this bound then the distance is set to 0.
alpha_scheduler : An object of type `AlphaScheduler` which is before computing the loss
in order to get a new value for `alpha`.
smooth_expert_weight_decay : If not None, will redistribute (smooth) the weight assigned to the cross
entropy loss at a particular step over the following `smooth_expert_steps` steps. Values
of `smooth_expert_weight_decay` near 1 will increase how evenly weight is assigned
to future steps. Values near 0 will decrease how evenly this weight is distributed
with larger weight being given steps less far into the `future`.
Here `smooth_expert_steps` is automatically defined from `smooth_expert_weight_decay` as detailed below.
smooth_expert_steps : The number of "future" steps over which to distribute the current steps weight.
This value is computed as `math.ceil(-math.log(1 + ((1 - r) / r) / 0.05) / math.log(r)) - 1` where
`r=smooth_expert_weight_decay`. This ensures that the weight is always distributed over at least
one additional step and that it is never distributed more than 20 steps into the future.
"""
def __init__(
self,
rl_loss: Optional[Union[Union[PPO, A2C], Builder[Union[PPO, A2C]]]],
fixed_alpha: Optional[float],
fixed_bound: Optional[float],
alpha_scheduler: AlphaScheduler = None,
smooth_expert_weight_decay: Optional[float] = None,
*args,
**kwargs
):
"""Initializer.
See the class documentation for parameter definitions not included below.
fixed_alpha: This fixed value of `alpha` to use. This value is *IGNORED* if
alpha_scheduler is not None.
fixed_bound: This fixed value of the `bound` to use.
"""
assert len(kwargs) == len(args) == 0
super().__init__(*args, **kwargs)
self.rl_loss: Union[PPO, A2C]
if isinstance(rl_loss, Builder):
self.rl_loss = rl_loss()
else:
self.rl_loss = rl_loss
self.alpha = fixed_alpha
self.bound = fixed_bound
self.alpha_scheduler = alpha_scheduler
self.smooth_expert_weight_decay = smooth_expert_weight_decay
assert smooth_expert_weight_decay is None or (
0 < smooth_expert_weight_decay < 1
), "`smooth_expert_weight_decay` must be between 0 and 1."
if smooth_expert_weight_decay is not None:
r = smooth_expert_weight_decay
self.smooth_expert_steps = (
math.ceil(-math.log(1 + ((1 - r) / r) / 0.05) / math.log(r)) - 1
)
def loss( # type: ignore
self,
step_count: int,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs
):
if self.alpha_scheduler is not None:
self.alpha = self.alpha_scheduler.next(step_count=step_count)
# Imitation calculation
observations = cast(Dict[str, torch.Tensor], batch["observations"])
if "expert_action" not in observations:
raise NotImplementedError(
"AdvisorImitationStage loss requires that an `expert_action` is given as input"
)
expert_actions_and_mask = observations["expert_action"]
assert expert_actions_and_mask.shape[-1] == 2
expert_actions_and_mask_reshaped = expert_actions_and_mask.view(-1, 2)
expert_actions = expert_actions_and_mask_reshaped[:, 0].view(
*expert_actions_and_mask.shape[:-1], 1
)
expert_actions_masks = (
expert_actions_and_mask_reshaped[:, 1]
.float()
.view(*expert_actions_and_mask.shape[:-1], 1)
)
expert_successes = expert_actions_masks.sum()
if expert_successes.item() == 0:
return 0, {}
main_expert_neg_cross_entropy = actor_critic_output.distributions.log_prob(
cast(torch.LongTensor, expert_actions)
)
aux_expert_neg_cross_entropy = actor_critic_output.extras[
"auxiliary_distributions"
].log_prob(cast(torch.LongTensor, expert_actions))
# Add dimensions to `expert_actions_masks` on the right to allow for masking
# if necessary.
assert main_expert_neg_cross_entropy.shape == aux_expert_neg_cross_entropy.shape
assert (
main_expert_neg_cross_entropy.shape[: len(expert_actions_masks.shape)]
== expert_actions_masks.shape
)
len_diff = len(main_expert_neg_cross_entropy.shape) - len(
expert_actions_masks.shape
)
assert len_diff >= 0
expert_actions_masks = expert_actions_masks.view(
*expert_actions_masks.shape, *((1,) * len_diff)
)
aux_expert_ce_loss = -(
expert_actions_masks * aux_expert_neg_cross_entropy
).sum() / torch.clamp(expert_successes, min=1)
if self.bound > 0:
top_bound = math.log(self.bound)
else:
top_bound = -float("inf")
use_expert_weights = (
torch.exp(self.alpha * aux_expert_neg_cross_entropy)
* expert_actions_masks
* (aux_expert_neg_cross_entropy >= top_bound).float()
).detach()
if self.smooth_expert_weight_decay:
# Here we smooth `use_expert_weights` so that a weight p assigned
# to a step at time t is redisributed to steps
# t, t+1, ..., t + self.smooth_expert_steps. This redistribution of
# weight p is not allowed to pass from one episode to another and so
# batch["masks"] must be used to prevent this.
_, nsamplers, _ = expert_actions_masks.shape[1]
start_shape = use_expert_weights.shape
use_expert_weights = use_expert_weights.view(-1, nsamplers)
padded_weights = F.pad(
use_expert_weights, [0, 0, self.smooth_expert_steps, 0]
)
masks = cast(torch.Tensor, batch["masks"]).view(-1, nsamplers)
padded_masks = F.pad(masks, [0, 0, self.smooth_expert_steps, 0])
divisors = torch.ones_like(masks) # Keep track of normalizing constants
for i in range(1, self.smooth_expert_steps + 1):
# Modify `use_expert_weights` so that weights are now computed as a
# weighted sum of previous weights.
masks = masks * padded_masks[self.smooth_expert_steps - i : -i, :]
use_expert_weights += (
self.smooth_expert_weight_decay ** i
) * padded_weights[self.smooth_expert_steps - i : -i, :]
divisors += masks * (self.smooth_expert_weight_decay ** i)
use_expert_weights /= divisors
use_expert_weights = use_expert_weights.view(*start_shape)
# noinspection PyTypeChecker
use_rl_weights = 1 - use_expert_weights
weighted_main_expert_ce_loss = -(
use_expert_weights * main_expert_neg_cross_entropy
).mean()
total_loss = aux_expert_ce_loss + weighted_main_expert_ce_loss
output_dict = {
"aux_expert_ce_loss": aux_expert_ce_loss.item(),
"weighted_main_expert_ce_loss": weighted_main_expert_ce_loss.item(),
"non_zero_weight": (use_expert_weights > 0).float().mean().item(),
"weight": use_expert_weights.mean().item(),
}
# RL Loss Computation
if self.rl_loss is not None:
rl_losses = self.rl_loss.loss_per_step(
step_count=step_count,
batch=batch,
actor_critic_output=actor_critic_output,
)
if isinstance(rl_losses, tuple):
rl_losses = rl_losses[0]
action_loss, rl_action_loss_weight = rl_losses["action"]
assert rl_action_loss_weight is None
entropy_loss, rl_entropy_loss_weight = rl_losses["entropy"]
def reweight(loss, w):
return loss if w is None else loss * w
weighted_action_loss = (
use_rl_weights * (reweight(action_loss, rl_action_loss_weight))
).mean()
weighted_entropy_loss = (
use_rl_weights * reweight(entropy_loss, rl_entropy_loss_weight)
).mean()
value_loss = rl_losses["value"][0].mean()
total_loss += (
(value_loss * rl_losses["value"][1])
+ weighted_action_loss
+ weighted_entropy_loss
)
output_dict.update(
{
"value_loss": value_loss.item(),
"weighted_action_loss": weighted_action_loss.item(),
"entropy_loss": entropy_loss.mean().item(),
}
)
output_dict["total_loss"] = total_loss.item()
return total_loss, output_dict
class MiniGridOffPolicyAdvisorLoss(AbstractOffPolicyLoss[ActorCriticModel]):
def __init__(
self,
fixed_alpha: Optional[float],
fixed_bound: Optional[float],
total_episodes_in_epoch: Optional[int] = None,
alpha_scheduler: AlphaScheduler = None,
smooth_expert_weight_decay: Optional[float] = None,
*args,
**kwargs
):
super().__init__()
self.advisor_loss = AdvisorWeightedStage(
rl_loss=None,
fixed_alpha=fixed_alpha,
fixed_bound=fixed_bound,
alpha_scheduler=alpha_scheduler,
smooth_expert_weight_decay=smooth_expert_weight_decay,
*args,
**kwargs
)
self.total_episodes_in_epoch = total_episodes_in_epoch
def loss(
self,
step_count: int,
model: ActorCriticModel,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
memory: Memory,
*args,
**kwargs
) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:
rollout_len, nrollouts = batch["minigrid_ego_image"].shape[:2]
# Initialize Memory if empty
if len(memory) == 0:
spec = model.recurrent_memory_specification
for key in spec:
dims_template, dtype = spec[key]
# get sampler_dim and all_dims from dims_template (and nrollouts)
dim_names = [d[0] for d in dims_template]
sampler_dim = dim_names.index("sampler")
all_dims = [d[1] for d in dims_template]
all_dims[sampler_dim] = nrollouts
memory.check_append(
key=key,
tensor=torch.zeros(
*all_dims,
dtype=dtype,
device=cast(torch.Tensor, batch["minigrid_ego_image"]).device
),
sampler_dim=sampler_dim,
)
# Forward data (through the actor and critic)
ac_out, memory = model.forward(
observations=batch,
memory=memory,
prev_actions=None, # type:ignore
masks=cast(torch.FloatTensor, batch["masks"]),
)
total_loss, losses_dict = self.advisor_loss.loss(
step_count=step_count,
batch={
"observations": {
"expert_action": torch.cat(
(
batch["expert_action"].view(rollout_len, nrollouts, 1),
torch.ones(rollout_len, nrollouts, 1, dtype=torch.int64).to(
batch["expert_action"].device
),
),
dim=-1,
)
}
},
actor_critic_output=ac_out,
)
info = {"offpolicy_" + key: val for key, val in losses_dict.items()}
if self.total_episodes_in_epoch is not None:
if "completed_episode_count" not in memory:
memory["completed_episode_count"] = 0
memory["completed_episode_count"] += (
int(np.prod(batch["masks"].shape)) - batch["masks"].sum().item()
)
info["epoch_progress"] = (
memory["completed_episode_count"] / self.total_episodes_in_epoch
)
return total_loss, info, memory, rollout_len * nrollouts
class GAILDiscriminatorLoss(AbstractActorCriticLoss):
def __init__(
self,
data_iterator_builder,
discriminator_observation_uuid: str,
gradient_penalty_coeff: int = 10,
):
super().__init__()
self.data_iterator_builder = data_iterator_builder
self.data_iterator = data_iterator_builder()
self.discriminator_observation_uuid = discriminator_observation_uuid
self.gradient_penalty_coeff = gradient_penalty_coeff
def get_next_demo_batch(self):
try:
expert_batch = next(self.data_iterator)
except StopIteration:
self.data_iterator = self.data_iterator_builder()
expert_batch = next(self.data_iterator)
return expert_batch
def loss( # type: ignore
self,
step_count: int,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs
):
expert_batch = cast(Dict[str, torch.Tensor], self.get_next_demo_batch())
device = batch["observations"][self.discriminator_observation_uuid].device
expert_batch = to_device_recursively(expert_batch, device=device, inplace=True)
rollout_len, nrollouts = expert_batch[
self.discriminator_observation_uuid
].shape[:2]
# expert_batch == offpolicy
expert_actions = expert_batch["expert_action"]
expert_masks = expert_batch["masks"]
# batch == onpolicy
policy_observations = cast(Dict[str, torch.Tensor], batch["observations"])
policy_actions = batch["actions"]
policy_masks = batch["masks"]
assert (
expert_batch[self.discriminator_observation_uuid].shape
== policy_observations[self.discriminator_observation_uuid].shape
)
assert expert_actions.shape == policy_actions.shape
assert expert_masks.shape == policy_masks.shape
assert expert_actions.shape[:2] == (rollout_len, nrollouts)
discriminator_network: MiniGridDiscriminator = actor_critic_output.extras[
"discriminator"
]
expert_logits = discriminator_network(
observations=expert_batch, actions=expert_actions, masks=expert_masks
)
expert_loss = F.binary_cross_entropy_with_logits(
expert_logits, torch.ones(expert_logits.size()).to(device)
)
policy_logits = discriminator_network(
observations=policy_observations, actions=policy_actions, masks=policy_masks
)
policy_loss = F.binary_cross_entropy_with_logits(
policy_logits, torch.zeros(policy_logits.size()).to(device)
)
gradient_penalty = discriminator_network.compute_grad_pen(
expert_observations=expert_batch,
expert_actions=expert_actions,
policy_observations=policy_observations,
policy_actions=policy_actions,
expert_masks=expert_masks,
policy_masks=policy_masks,
)
return (
expert_loss + policy_loss + self.gradient_penalty_coeff * gradient_penalty,
{
"gail_discriminator": (expert_loss + policy_loss).item(),
"gail_gradient_penalty": gradient_penalty.item(),
},
)
def _compute_returns_and_adv(
rewards, next_value, use_gae, gamma, tau, value_preds, masks
):
returns = torch.zeros_like(value_preds)
if use_gae:
assert torch.all(torch.eq(value_preds[-1], next_value))
gae = 0
for step in reversed(range(rewards.size(0))):
delta = (
rewards[step]
+ gamma * value_preds[step + 1] * masks[step + 1]
- value_preds[step]
)
gae = delta + gamma * tau * masks[step + 1] * gae
returns[step] = gae + value_preds[step]
else:
returns[-1] = next_value
for step in reversed(range(rewards.size(0))):
returns[step] = returns[step + 1] * gamma * masks[step + 1] + rewards[step]
advantages = returns[:-1] - value_preds[:-1]
normalized_advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
return returns, normalized_advantages
class GAILPPO(AbstractActorCriticLoss):
def __init__(
self,
clip_param: float,
value_loss_coef: float,
entropy_coef: float,
gamma: float,
use_gae: bool,
gae_lambda: float,
nrollouts: int,
rollout_len: int,
use_clipped_value_loss=True,
clip_decay: Optional[Callable[[int], float]] = None,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.clip_param = clip_param
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.use_clipped_value_loss = use_clipped_value_loss
self.clip_decay = clip_decay if clip_decay is not None else (lambda x: 1.0)
self.gamma = gamma
self.use_gae = use_gae
self.gae_lambda = gae_lambda
self.running_means_std_of_returns = RunningMeanStd(shape=(1,))
self.nrollouts = nrollouts
self.rollout_len = rollout_len
@staticmethod
def _unflatten_helper(t: int, n: int, tensor: torch.Tensor) -> torch.Tensor:
"""Given a tensor of size (t*n, ...) 'unflatten' it to size (t, n, ..).
# Parameters
t : first dimension of desired tensor.
n : second dimension of desired tensor.
tensor : target tensor to be unflattened.
# Returns
Unflattened tensor of size (t, n, ...)
"""
return tensor.view(t, n, *tensor.size()[1:])
def loss_per_step(
self,
step_count: int,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
actor_critic_output: ActorCriticOutput[CategoricalDistr],
) -> Dict[str, Tuple[torch.Tensor, Optional[float]]]:
# Based on function with the same name in `PPO` (ppo.py)
# Rewards are model based, hence, returns and advantages are recalculated.
# Since next_value_pred of the N+1th observation isn't available, we reduce the time horizon
# by one and calculate standard PPO losses. (Hence, the `[:-1]` are various places.
actions = cast(torch.LongTensor, batch["actions"])
# Calculate rewards
observations = batch["observations"]
discriminator = actor_critic_output.extras["discriminator"]
unnorm_rewards = discriminator(
observations=observations, actions=actions, masks=batch["masks"]
).detach()
rewards = unnorm_rewards / (
math.sqrt(float(self.running_means_std_of_returns.var[0])) + 1e-8
)
# computing returns expects data to be fed in a (rollout_len, nrollouts, 1) format
# further reducing rewards' horizon by 1 so that batch_values is sufficient without needing
# the next predicted value (exposed only at the level of the engine).
rewards = rewards[:-1]
batch_values = batch["values"]
batch_masks = batch["masks"]
# computing returns and advantages based on model based reward predictions
next_value = batch_values[-1]
returns, norm_adv_targ = _compute_returns_and_adv(
rewards=rewards,
next_value=next_value,
use_gae=self.use_gae,
gamma=self.gamma,
tau=self.gae_lambda,
value_preds=batch_values,
masks=batch_masks,
)
self.running_means_std_of_returns.update(returns.view(-1).cpu().numpy())
# reducing time horizon by one
values = actor_critic_output.values[:-1]
dist_entropy = actor_critic_output.distributions.entropy()[:-1]
action_log_probs = actor_critic_output.distributions.log_prob(actions)[:-1]
batch_old_action_log_probs = batch["old_action_log_probs"][:-1]
batch_values = batch_values[:-1]
returns = returns[:-1]
# Everything used next is (rollout_len - 1, nrollouts, 1)
# action_log_probs
# batch_old_action_log_probs
# norm_adv_targ
# values
# batch_values
# returns
def add_trailing_dims(t: torch.Tensor):
assert len(t.shape) <= len(batch["norm_adv_targ"].shape)
return t.view(
t.shape + ((1,) * (len(batch["norm_adv_targ"].shape) - len(t.shape)))
)
dist_entropy = add_trailing_dims(dist_entropy)
clip_param = self.clip_param * self.clip_decay(step_count)
# Standard PPO loss components (but based on model based rewards instead of env based ones)
ratio = torch.exp(action_log_probs - batch_old_action_log_probs)
ratio = add_trailing_dims(ratio)
surr1 = ratio * norm_adv_targ
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param) * norm_adv_targ
action_loss = -torch.min(surr1, surr2)
if self.use_clipped_value_loss:
value_pred_clipped = batch_values + (values - batch_values).clamp(
-clip_param, clip_param
)
value_losses = (values - returns).pow(2)
value_losses_clipped = (value_pred_clipped - returns).pow(2)
value_loss = 0.5 * torch.max(value_losses, value_losses_clipped)
else:
value_loss = 0.5 * (cast(torch.FloatTensor, returns) - values).pow(2)
# noinspection PyUnresolvedReferences
assert (
value_loss.shape
== action_loss.shape
== value_loss.shape
== (self.rollout_len - 1, self.nrollouts, 1)
)
return {
"value": (value_loss, self.value_loss_coef),
"action": (action_loss, None),
"entropy": (dist_entropy.mul_(-1.0), self.entropy_coef), # type: ignore
}
def loss( # type: ignore
self,
step_count: int,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
actor_critic_output: ActorCriticOutput[CategoricalDistr],
*args,
**kwargs
):
# Same as `loss` in `PPO` (ppo.py)
losses_per_step = self.loss_per_step(
step_count=step_count, batch=batch, actor_critic_output=actor_critic_output,
)
if isinstance(losses_per_step[0], tuple):
losses_per_step = losses_per_step[0]
losses = {
key: (loss.mean(), weight)
for (key, (loss, weight)) in losses_per_step.items()
}
total_loss = sum(
loss * weight if weight is not None else loss
for loss, weight in losses.values()
)
return (
total_loss,
{**{key: loss.item() for key, (loss, _) in losses.items()},},
)
| advisor-main | advisor_losses.py |
import os
from pathlib import Path
MINIGRID_EXPERT_TRAJECTORIES_DIR = os.path.abspath(
os.path.join(os.path.dirname(Path(__file__)), "minigrid_data", "minigrid_demos")
)
MINIGRID_ENV_NAMES_SUPPORTED = (
"CrossingS25N10", # LavaCrossing (S25, N10)
"WallCrossingS25N10", # WallCrossing (S25, N10)
"AskForHelpSimpleCrossing", # WC Faulty Switch (S15, N7)
"WallCrossingCorruptExpertS25N10", # WC Corrupt (S25, N10)
"AskForHelpLavaCrossingSmall", # LC Faulty Switch (S9, N4)
"AskForHelpSimpleCrossingOnce", # WC Once Switch (S15, N7)
"AskForHelpLavaCrossingOnce", # LC Once Switch (S15, N7)
"LavaCrossingCorruptExpertS15N7", # LC Corrupt (S15, N7)
)
ENV_NAMES_TO_TITLE = {
"CrossingS25N10": r"$\textsc{LavaCrossing (LC)}$",
"WallCrossingS25N10": r"$\textsc{WallCrossing (WC)}$",
"AskForHelpSimpleCrossing": r"$\textsc{WC Faulty Switch}$",
"WallCrossingCorruptExpertS25N10": r"$\textsc{WC Corrupt}$",
"AskForHelpLavaCrossingSmall": r"$\textsc{LC Faulty Switch}$",
"AskForHelpSimpleCrossingOnce": r"$\textsc{WC Once Switch}$",
"AskForHelpLavaCrossingOnce": r"$\textsc{LC Once Switch}$",
"LavaCrossingCorruptExpertS15N7": r"$\textsc{LC Corrupt}$",
"PoisonedDoors": r"$\textsc{PoisonedDoors}$",
}
def demos_exist_for_env(env_name: str):
if env_name.lower().strip() == "poisoneddoors":
return True
return os.path.exists(
os.path.join(MINIGRID_EXPERT_TRAJECTORIES_DIR, f"MiniGrid-{env_name}-v0.pkl")
)
| advisor-main | minigrid_constants.py |
import os
from pathlib import Path
import matplotlib.pyplot as plt
from allenact.utils.misc_utils import TABLEAU10_RGB
ADVISOR_TOP_LEVEL_DIR = os.path.abspath(os.path.dirname(Path(__file__)))
NICE_COLORS12_RGB = TABLEAU10_RGB + (
(31, 119, 180),
(255, 127, 14),
(44, 160, 44),
(214, 39, 40),
(148, 103, 189),
(140, 86, 75),
(227, 119, 194),
(127, 127, 127),
(188, 189, 34),
(23, 190, 207),
(128, 128, 128),
)
plt.rc("font", **{"family": "serif", "serif": ["CMU"]})
plt.rc("text", usetex=True)
plt.rc("text.latex", preamble=r"\usepackage{amsmath}")
FIXED_ADVISOR_STR = r"ADV"
EXPERIMENT_STR_TO_LABEL_DICT = {
"dagger_then_ppo": r"$\dagger \to$ PPO",
"bc_then_ppo": r"BC$ \to$ PPO",
"bc": r"BC",
"dagger": r"DAgger $(\dagger)$",
"ppo": r"PPO",
"ppo_with_offpolicy": r"BC$^{\text{demo}} +$ PPO",
"pure_offpolicy": r"BC$^{\text{demo}}$",
"gail": r"GAIL",
"bc_teacher_forcing": r"BC$^{\text{tf}=1}$",
"bc_teacher_forcing_then_ppo": r"BC$^{\text{tf}=1} \to$ PPO",
"bc_with_ppo": r"BC$+$PPO (static)",
# ADVISOR
"advisor": r"{}".format(FIXED_ADVISOR_STR),
"dagger_then_advisor": r"$\dagger \to$ {}".format(FIXED_ADVISOR_STR),
"ppo_with_offpolicy_advisor": r"ADV$^{\text{demo}} +$ PPO",
"bc_teacher_forcing_then_advisor": r"BC$^{\text{tf}=1} \to$ ADV",
}
TYPE_TO_EXPERIMENT_STRS = {
"rl": ["ppo"],
"rl+il": [
"bc_with_ppo",
"bc_then_ppo",
"dagger_then_ppo",
"bc_teacher_forcing_then_ppo",
],
"il": ["bc", "dagger", "bc_teacher_forcing"],
"demos": ["pure_offpolicy", "ppo_with_offpolicy", "gail"],
"advisor": [
"advisor",
"bc_teacher_forcing_then_advisor",
"dagger_then_advisor",
"ppo_with_offpolicy_advisor",
],
}
EXPERIMENT_STR_TO_TYPE = {
v: k for k in TYPE_TO_EXPERIMENT_STRS for v in TYPE_TO_EXPERIMENT_STRS[k]
}
METHOD_ORDER = [
v
for k in ["rl", "il", "rl+il", "demos", "advisor"]
for v in TYPE_TO_EXPERIMENT_STRS[k]
]
METHOD_TO_COLOR = {}
METHOD_TO_LINE_STYLE = {}
METHOD_TO_LINE_MARKER = {}
NICE_MARKERS = ("", "|", "x", "^")
def _init_method_to_dictionaries():
for type_ind, type in enumerate(TYPE_TO_EXPERIMENT_STRS):
# COLOR (based on type, "rl", "rl+il", etc)
n = len(NICE_COLORS12_RGB)
for method_ind, method in enumerate(TYPE_TO_EXPERIMENT_STRS[type]):
METHOD_TO_COLOR[method] = NICE_COLORS12_RGB[
(type_ind + (type_ind // n)) % n
]
# STYLE
METHOD_TO_LINE_STYLE[method] = ["solid", "dashed", "dashdot"][
method_ind % 3
]
# MARKER
METHOD_TO_LINE_MARKER[method] = NICE_MARKERS[method_ind % len(NICE_MARKERS)]
_init_method_to_dictionaries()
| advisor-main | advisor_constants.py |
from typing import Callable, Dict, Optional, Any, cast
import gym
import numpy as np
import torch
from gym.spaces.dict import Dict as SpaceDict
from torch import nn, autograd
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import Memory
from allenact.embodiedai.models.basic_models import LinearActorCritic, RNNStateEncoder
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.minigrid_plugin.minigrid_models import MiniGridSimpleConvRNN
class MiniGridDiscriminator(nn.Module):
def __init__(
self,
observation_space: SpaceDict,
num_objects: int,
num_colors: int,
num_states: int,
num_actions: int,
object_embedding_dim: int,
action_embedding_dim: int,
classifier_hidden_dim: int,
):
super(MiniGridDiscriminator, self).__init__()
self.object_embedding_dim = object_embedding_dim
self.action_embedding_dim = action_embedding_dim
# Same dimensionality used for colors and states
self.color_embedding_dim = object_embedding_dim
self.state_embedding_dim = object_embedding_dim
# Input shapes
vis_input_shape = observation_space["minigrid_ego_image"].shape
agent_view_x, agent_view_y, view_channels = vis_input_shape
assert agent_view_x == agent_view_y
self.agent_view = agent_view_x
self.view_channels = view_channels
assert (np.array(vis_input_shape[:2]) >= 7).all(), (
"MiniGridDiscriminator requires" "that the input size be at least 7x7."
)
# Object, Color, State --> Embeddings
self.object_embedding = nn.Embedding(
num_embeddings=num_objects, embedding_dim=self.object_embedding_dim
)
self.color_embedding = nn.Embedding(
num_embeddings=num_colors, embedding_dim=self.color_embedding_dim
)
self.state_embedding = nn.Embedding(
num_embeddings=num_states, embedding_dim=self.state_embedding_dim
)
# Same dimensionality used for actions
self.action_embedding = nn.Embedding(
num_embeddings=num_actions, embedding_dim=self.action_embedding_dim
)
# Classifier
classifier_input_dim = (
agent_view_x
* agent_view_y
* (
self.object_embedding_dim
+ self.color_embedding_dim
+ self.state_embedding_dim
)
+ self.action_embedding_dim
)
self.classifier = nn.Sequential(
nn.Linear(classifier_input_dim, classifier_hidden_dim),
nn.Tanh(),
nn.Linear(classifier_hidden_dim, classifier_hidden_dim),
nn.Tanh(),
nn.Linear(classifier_hidden_dim, 1),
)
# self.returns = None
# self.ret_rms = RunningMeanStd(shape=())
self.train()
def compute_grad_pen(
self,
expert_observations,
expert_actions,
policy_observations,
policy_actions,
pass_grad_through_encoder=False,
expert_masks=None,
policy_masks=None,
):
alpha = torch.rand(*expert_observations["minigrid_ego_image"].shape[:2], 1)
with torch.set_grad_enabled(pass_grad_through_encoder):
encoded_expert_data = self.encode_minigrid_observations_actions(
expert_observations, expert_actions, masks=expert_masks
)
encoded_policy_data = self.encode_minigrid_observations_actions(
policy_observations, policy_actions, masks=policy_masks
)
alpha = alpha.expand_as(encoded_expert_data).to(encoded_expert_data.device)
mixup_data = alpha * encoded_expert_data + (1 - alpha) * encoded_policy_data
mixup_data.requires_grad = True
disc = self.classifier(mixup_data)
ones = torch.ones(disc.size()).to(disc.device)
grad = autograd.grad(
outputs=disc,
inputs=mixup_data,
grad_outputs=ones,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
grad_pen = (
(grad.norm(2, dim=1) - 1).pow(2).mean()
) # Scaling factor moved to the loss level
return grad_pen
def encode_minigrid_observations_actions(
self, observations: Dict[str, Any], actions, masks: Optional[torch.Tensor],
):
minigrid_ego_image = observations["minigrid_ego_image"]
rollout_len, nrollouts, nrow, ncol, nchannels = minigrid_ego_image.shape
minigrid_ego_image = minigrid_ego_image.view(
rollout_len * nrollouts, nrow, ncol, nchannels
)
assert nrow == ncol == self.agent_view
assert nchannels == self.view_channels == 3
ego_object_embeds = self.object_embedding(minigrid_ego_image[:, :, :, 0].long())
ego_color_embeds = self.color_embedding(minigrid_ego_image[:, :, :, 1].long())
ego_state_embeds = self.state_embedding(minigrid_ego_image[:, :, :, 2].long())
ego_embeds = torch.cat(
(ego_object_embeds, ego_color_embeds, ego_state_embeds), dim=-1
)
action_embeds = self.action_embedding(actions.long())
output_embeds = torch.cat(
(
ego_embeds.view(rollout_len, nrollouts, -1),
action_embeds.view(rollout_len, nrollouts, -1),
),
dim=-1,
)
return output_embeds
def forward(
self,
observations: Dict[str, Any],
actions,
masks: Optional[torch.Tensor] = None,
):
return self.classifier(
self.encode_minigrid_observations_actions(
observations=observations, actions=actions, masks=masks
)
)
class MiniGridDiscriminatorRNN(MiniGridDiscriminator):
def __init__(
self,
observation_space: SpaceDict,
num_objects: int,
num_colors: int,
num_states: int,
num_actions: int,
object_embedding_dim: int,
action_embedding_dim: int,
classifier_hidden_dim: int,
rnn_output_dim: int = 256,
):
super().__init__(
**prepare_locals_for_super(
{k: v for k, v in locals().items() if k != "rnn_output_dim"}
)
)
# Classifier
input_dim = (
self.agent_view
* self.agent_view
* (
self.object_embedding_dim
+ self.color_embedding_dim
+ self.state_embedding_dim
)
+ self.action_embedding_dim
)
self.state_encoder = RNNStateEncoder(
input_size=input_dim, hidden_size=rnn_output_dim
)
self.start_hidden_state = nn.Parameter(
torch.zeros(self.state_encoder.num_recurrent_layers, 1, rnn_output_dim),
requires_grad=True,
)
self.classifier = nn.Sequential(
nn.Linear(rnn_output_dim, classifier_hidden_dim),
nn.Tanh(),
nn.Linear(classifier_hidden_dim, classifier_hidden_dim),
nn.Tanh(),
nn.Linear(classifier_hidden_dim, 1),
)
self.train()
def encode_minigrid_observations_actions(
self, observations: Dict[str, Any], actions, masks: Optional[torch.Tensor],
):
minigrid_ego_image = observations["minigrid_ego_image"]
rollout_len, nrollouts, nrow, ncol, nchannels = minigrid_ego_image.shape
minigrid_ego_image = minigrid_ego_image.view(
rollout_len * nrollouts, nrow, ncol, nchannels
)
assert nrow == ncol == self.agent_view
assert nchannels == self.view_channels == 3
ego_object_embeds = self.object_embedding(minigrid_ego_image[:, :, :, 0].long())
ego_color_embeds = self.color_embedding(minigrid_ego_image[:, :, :, 1].long())
ego_state_embeds = self.state_embedding(minigrid_ego_image[:, :, :, 2].long())
ego_embeds = torch.cat(
(ego_object_embeds, ego_color_embeds, ego_state_embeds), dim=-1
)
action_embeds = self.action_embedding(actions.long())
output_embeds = torch.cat(
(
ego_embeds.view(rollout_len, nrollouts, -1),
action_embeds.view(rollout_len, nrollouts, -1),
),
dim=-1,
)
out, hidden = self.state_encoder.forward(
x=cast(torch.FloatTensor, output_embeds),
hidden_states=cast(
torch.FloatTensor, self.start_hidden_state.repeat(1, nrollouts, 1)
),
masks=cast(torch.FloatTensor, masks),
)
return out
class MiniGridSimpleConvRNNWithDiscriminator(MiniGridSimpleConvRNN):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
num_objects: int,
num_colors: int,
num_states: int,
object_embedding_dim: int = 8,
action_embedding_dim: int = 64,
classifier_hidden_dim: int = 128,
hidden_size=512,
num_layers=1,
rnn_type="GRU",
head_type: Callable[
..., ActorCriticModel[CategoricalDistr]
] = LinearActorCritic,
recurrent_discriminator: bool = False,
):
super().__init__(
action_space=action_space,
observation_space=observation_space,
num_objects=num_objects,
num_colors=num_colors,
num_states=num_states,
object_embedding_dim=object_embedding_dim,
hidden_size=hidden_size,
num_layers=num_layers,
rnn_type=rnn_type,
head_type=head_type,
)
discriminator_class = (
MiniGridDiscriminatorRNN
if recurrent_discriminator
else MiniGridDiscriminator
)
self.discriminator = discriminator_class(
observation_space,
num_objects,
num_colors,
num_states,
action_space.n,
object_embedding_dim,
action_embedding_dim,
classifier_hidden_dim,
)
def forward(
self,
observations: Dict[str, Any],
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
):
out, memory = super(MiniGridSimpleConvRNNWithDiscriminator, self).forward(
observations=observations,
memory=memory,
prev_actions=prev_actions,
masks=masks,
)
out.extras["discriminator"] = self.discriminator
return out, memory
class PoisonedDoorsDiscriminatorRNN(nn.Module):
def __init__(
self,
input_uuid: str,
num_action_embeddings: int,
num_observation_embeddings: int,
embedding_dim: int,
classifier_hidden_dim: int,
rnn_output_dim: int = 256,
):
super().__init__()
self.input_uuid = input_uuid
self.observation_embedding = nn.Embedding(
num_embeddings=num_observation_embeddings, embedding_dim=embedding_dim
)
self.action_embedding = nn.Embedding(
num_embeddings=num_action_embeddings, embedding_dim=embedding_dim
)
# Classifier
self.state_encoder = RNNStateEncoder(
input_size=2 * embedding_dim, hidden_size=rnn_output_dim
)
self.start_hidden_state = nn.Parameter(
torch.zeros(self.state_encoder.num_recurrent_layers, 1, rnn_output_dim),
requires_grad=True,
)
self.classifier = nn.Sequential(
nn.Linear(rnn_output_dim, classifier_hidden_dim),
nn.Tanh(),
nn.Linear(classifier_hidden_dim, classifier_hidden_dim),
nn.Tanh(),
nn.Linear(classifier_hidden_dim, 1),
)
self.train()
def compute_grad_pen(
self,
expert_observations,
expert_actions,
policy_observations,
policy_actions,
pass_grad_through_encoder=False,
expert_masks=None,
policy_masks=None,
):
alpha = torch.rand(*expert_observations[self.input_uuid].shape[:2], 1)
with torch.set_grad_enabled(pass_grad_through_encoder):
encoded_expert_data = self.encode_observations_and_actions(
observations=expert_observations,
actions=expert_actions,
masks=expert_masks,
)
encoded_policy_data = self.encode_observations_and_actions(
observations=policy_observations,
actions=policy_actions,
masks=policy_masks,
)
alpha = alpha.expand_as(encoded_expert_data).to(encoded_expert_data.device)
mixup_data = alpha * encoded_expert_data + (1 - alpha) * encoded_policy_data
mixup_data.requires_grad = True
disc = self.classifier(mixup_data)
ones = torch.ones(disc.size()).to(disc.device)
grad = autograd.grad(
outputs=disc,
inputs=mixup_data,
grad_outputs=ones,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
grad_pen = (
(grad.norm(2, dim=1) - 1).pow(2).mean()
) # Scaling factor moved to the loss level
return grad_pen
def encode_observations_and_actions(
self, observations: Dict[str, Any], actions, masks: Optional[torch.Tensor],
):
rollout_len, nrollouts = actions.shape
obs_embed = self.observation_embedding(
observations[self.input_uuid].view(rollout_len, nrollouts)
)
action_embed = self.action_embedding(actions)
x = torch.cat((obs_embed, action_embed), dim=-1)
assert len(x.shape) == 3
out, hidden = self.state_encoder.forward(
x=cast(torch.FloatTensor, x),
hidden_states=cast(
torch.FloatTensor, self.start_hidden_state.repeat(1, x.shape[1], 1)
),
masks=cast(torch.FloatTensor, masks),
)
return out
def forward(
self,
observations: Dict[str, Any],
actions,
masks: Optional[torch.Tensor] = None,
):
return self.classifier(
self.encode_observations_and_actions(
observations=observations, actions=actions, masks=masks
)
)
| advisor-main | gail_models.py |
import glob
import json
import math
import os
import shutil
import time
from typing import Optional
import torch
import torch.multiprocessing as mp
from allenact.algorithms.onpolicy_sync.runner import OnPolicyRunner
from allenact.main import get_args, init_logging, load_config
from allenact_plugins.lighthouse_plugin.lighthouse_environment import (
LightHouseEnvironment,
)
from projects.advisor.lighthouse_experiments.base import (
BaseLightHouseExperimentConfig,
LighthouseExperimentParams,
)
mp = mp.get_context("forkserver")
import queue
from setproctitle import setproctitle as ptitle
import pandas as pd
import numpy as np
from allenact.utils.system import get_logger, update_log_level
def iteratively_run_lighthouse_experiments(
process_id: int,
gpu_id: Optional[int],
args,
world_dim: int,
world_radius: int,
input_queue: mp.Queue,
output_queue: mp.Queue,
log_level: str,
test_seed_offset: Optional[int] = None,
):
"""Iteratively train and test lighthouse models with different levels of
supervision.
This function is meant to be run as a subprocess. It iteratively samples
`input_queue` from a queue that define the next experiment to run (e.g. the agent's view
radius, the expert's view radius, and any seed). It then runs this experiment
and adds the results of the experiment to the `output_queue` which is collated
by the main process.
# Attributes
process_id : This process' id.
gpu_id : The gpu to run experiments on.
args : Command line arguments specifying the experiment config to run. E.g.
`projects.advisor.lighthouse_experiments.advisor`. Details of this
experiment config, for instance the agent's `view_radius` config are modified
by this function based on the values from the `input_queue`.
world_dim : The world dimension used in all experiments.
world_radius : The world radius used in all experiments.
input_queue : The queue from which experiment details are taken.
output_queue : The queue into which the results of running an experiment are saved.
test_seed_offset : If not `None`, used to redefine the `TEST_SEED_OFFSET` class constant
associated with the experiment config.
"""
ptitle("({}) Create Im. Mat. Runner".format(process_id))
init_logging(log_level)
def log_info(msg):
update_log_level(logger=get_logger(), human_log_level="info")
get_logger().info(msg)
update_log_level(logger=get_logger(), human_log_level=log_level)
try:
while True:
# Sample a new set of values defining the new experiment to run
view_radius, expert_view_radius, seed, lr = input_queue.get(timeout=1)
optimal_ave_ep_length = LightHouseEnvironment.optimal_ave_ep_length(
world_dim=world_dim, world_radius=world_radius, view_radius=view_radius
)
args.config_kwargs = json.dumps(
{
"GPU_ID": gpu_id,
"TEST_SEED_OFFSET": test_seed_offset
if test_seed_offset is not None
else 0,
"LR": lr if lr is not None else LighthouseExperimentParams().LR,
"VIEW_RADIUS": view_radius,
"EXPERT_VIEW_RADIUS": expert_view_radius,
}
)
# Grab the experiment config and set its GPU_ID.
cfg: BaseLightHouseExperimentConfig
cfg, _ = load_config(args) # type: ignore
log_info(
f"Running with (view, expert view, seed, lr) ="
f" ({view_radius}, {expert_view_radius}, {seed}, {cfg.exp_params.LR:.3g})."
f" Target optimal ep length: {optimal_ave_ep_length}."
)
assert args.seed is None
# Train the agent based on the experiment config.
runner = OnPolicyRunner(
config=cfg,
output_dir=args.output_dir,
loaded_config_src_files=None,
seed=seed,
mode="train",
mp_ctx=mp,
disable_tensorboard=True,
disable_config_saving=True,
)
train_start_time_str = runner.start_train(
max_sampler_processes_per_worker=1,
save_ckpt_after_every_pipeline_stage=False,
)
ckpt_dir = runner.checkpoint_dir(
start_time_str=train_start_time_str, create_if_none=False
)
log_info(
f"Running testing with (view, expert view, seed, lr) ="
f" ({view_radius}, {expert_view_radius}, {seed}, {cfg.exp_params.LR:.3g})."
)
runner.mode = "test"
test_results = runner.start_test(
checkpoint_path_dir_or_pattern=ckpt_dir,
max_sampler_processes_per_worker=1,
)
runner.close()
# Remove the checkpoint file saved above as we no longer need it.
assert len(glob.glob(os.path.join(ckpt_dir, "*"))) == len(
glob.glob(os.path.join(ckpt_dir, "*.pt"))
)
shutil.rmtree(ckpt_dir)
log_info(
f"Testing complete for (view, expert view, seed, lr) ="
f" ({view_radius}, {expert_view_radius}, {seed}, {cfg.exp_params.LR:.3g})."
)
# Put results from test evaluation into the output queue to be
# collated by the main thread.
output_queue.put(
(
(view_radius, expert_view_radius, seed, lr),
{
"view_radius": int(view_radius),
"expert_view_radius": None
if expert_view_radius is None
else int(expert_view_radius),
"optimal": optimal_ave_ep_length,
"reached_near_optimal": 1
* (test_results[0]["ep_length"] < optimal_ave_ep_length * 1.1),
"avg_ep_length": float(test_results[0]["ep_length"]),
"train_steps": int(test_results[0]["training_steps"]),
"seed": seed,
"start_time_str": train_start_time_str,
"lr": lr,
},
)
)
except queue.Empty:
log_info("Queue empty for worker {}, exiting.".format(process_id))
if __name__ == "__main__":
"""Controls the master process that: (1) Instantiates several subprocesses
which run the experiments. (2) Collates the results from the experiments
run in the subprocesses.
Get command line arguments that define the experiment. For instance, we might run
this script (from within the `advisor` directory), with arguments
```
python projects/advisor/lighthouse_scripts/save_pairwise_imitation_data.py \
projects/advisor/lighthouse_experiments/dagger_then_ppo.py \
-m 1 \
--output_dir hp_runs/lighthouse \
--log_level error
```
And this will exhaustively train using the `dagger_then_ppo` experiment
with various agent/expert view radii.
Generate all commands as follows:
```python
import glob
paths = [p for p in glob.glob("lighthouse_experiments/*.py") if "__init__" not in p and "base.py" not in p]
s = "python lighthouse_scripts/save_pairwise_imitation_data.py {} -m 1 --output_dir hp_runs/lighthouse --log_level error"
cmd = " ; ".join([s.format(p) for p in paths])
print(cmd)
```
"""
# Get command line arguments
args = get_args()
# Define fixed parameters
world_dim = 2
world_radius = 15
view_radii = list(range(1, 16, 2))
use_experts = args.experiment.split(".")[-1] not in ["a2c", "ppo"]
nrepeats = 25 if use_experts else 50 # Number of random seeds per experiment
if torch.cuda.is_available():
gpu_memory = torch.cuda.get_device_properties(0).total_memory
max_processes_for_gpus = torch.cuda.device_count() * math.floor(
gpu_memory / (2000 * (2 ** 20))
)
else:
max_processes_for_gpus = 0
nprocesses = (
min(max_processes_for_gpus, math.floor(0.9 * mp.cpu_count()))
if torch.cuda.is_available()
else 1
)
gpu_ids = (
[] if not torch.cuda.is_available() else list(range(torch.cuda.device_count()))
)
ptitle("Master (pairwise)")
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
# Where to save data
tsv_save_data_path = os.path.join(
output_dir,
"{}__{}_{}.tsv".format(
args.experiment.replace(".py", "").replace(".", "_"),
world_dim,
world_radius,
),
)
# Get any experiment data already saved (e.g. from previous runs)
if os.path.exists(tsv_save_data_path):
df = pd.read_csv(tsv_save_data_path, sep="\t")
else:
df = pd.DataFrame(
dict(
view_radius=[],
expert_view_radius=[],
reached_near_optimal=[],
avg_ep_length=[],
train_steps=[],
seed=[],
start_time_str=[],
)
)
# The experiments we've already run
seen_tuples = set(
zip(
df["view_radius"],
[None if np.isnan(x) else x for x in df["expert_view_radius"]],
df["seed"],
)
)
# Add experiments details into the `input_queue` but
# don't include experiments we've already run.
input_queue = mp.Queue()
total_runs = 0
for i, view_radius in enumerate(view_radii):
for expert_view_radius in view_radii[i:] if use_experts else [None]:
for seed in range(nrepeats):
total_runs += 1
t = (view_radius, expert_view_radius, seed)
if t not in seen_tuples:
input_queue.put(t + (None,))
output_queue = mp.Queue()
# Create the subprocesses that run experiments.
processes = []
for i in range(min(nprocesses, total_runs - len(seen_tuples))):
processes.append(
mp.Process(
target=iteratively_run_lighthouse_experiments,
kwargs=dict(
process_id=i,
gpu_id=gpu_ids[i % len(gpu_ids)] if len(gpu_ids) != 0 else None,
args=args,
world_dim=world_dim,
world_radius=world_radius,
input_queue=input_queue,
output_queue=output_queue,
log_level=args.log_level,
),
)
)
processes[-1].start()
time.sleep(0.1)
# Save experimental results from the subprocesses into a tsv file.
os.makedirs(os.path.dirname(tsv_save_data_path), exist_ok=True)
while len(seen_tuples) != total_runs:
new_seen_tuple, run_data = output_queue.get()
seen_tuples.add(new_seen_tuple[:-1]) # Don't include the learning rate
df = df.append(run_data, ignore_index=True)
df.to_csv(tsv_save_data_path, sep="\t", index=False)
for p in processes:
try:
p.join(1)
except Exception as _:
pass
print("Saving pairwise imitation data is done!")
| advisor-main | lighthouse_scripts/save_pairwise_imitation_data.py |
import copy
import glob
import os
import sys
from collections import defaultdict
from typing import Dict, Optional, Tuple, Union, Sequence
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.stats.proportion import proportion_confint
from advisor_constants import ADVISOR_TOP_LEVEL_DIR
from allenact.utils.misc_utils import TABLEAU10_RGB
plt.rc("font", **{"family": "serif", "serif": ["CMU"]})
plt.rc("text", usetex=True)
plt.rc("text.latex", preamble=r"\usepackage{amsmath}")
def boxplot(y, points, color, delta, lw, include_outliers=False):
lq, median, uq = np.percentile(points, [25, 50, 75])
iqr = uq - lq
outliers = points[np.abs(points - median) > 1.5 * iqr]
inliers = points[np.abs(points - median) <= 1.5 * iqr]
min, max = np.percentile(inliers, [0, 100])
delta = delta / 2
plt.hlines(
y, xmin=lq, xmax=uq, linewidth=lw, color=np.concatenate((color, [0.5]), axis=0)
)
plt.hlines(y, xmin=min, xmax=max, linewidth=lw / 4, color=color)
plt.vlines(median, ymin=y - delta, ymax=y + delta, linewidth=lw / 2, color=color)
if include_outliers and outliers.shape[0] != 0:
plt.scatter(outliers, [y] * len(outliers), s=1, color=color)
def set_size(w, h, ax=None):
"""Set figure axis sizes.
Taken from the answer in
https://stackoverflow.com/questions/44970010/axes-class-set-explicitly-size-width-height-of-axes-in-given-units
w, h: width, height in inches
"""
if not ax:
ax = plt.gca()
l = ax.figure.subplotpars.left
r = ax.figure.subplotpars.right
t = ax.figure.subplotpars.top
b = ax.figure.subplotpars.bottom
figw = float(w) / (r - l)
figh = float(h) / (t - b)
ax.figure.set_size_inches(figw, figh)
def plot_boxplots(
points_tensor: Sequence[Sequence[Sequence[float]]],
optimal: Optional[np.ndarray] = None,
title="",
xlabel="",
ylabel=r"View Radius ($i$)",
yticklabels="default",
save_path: Optional[str] = None,
xlim: Optional[Union[Tuple[float, float], Tuple[str, float, float]]] = None,
lw=2,
colors: Optional[Sequence[Tuple[int, int, int]]] = None,
fig_size=(4, 4 * 3.0 / 5.0),
hline_after: Optional[int] = None,
include_outliers: bool = False,
):
nrows, ncols = len(points_tensor), len(points_tensor[0])
assert all(len(row) == ncols for row in points_tensor)
nboxes = np.logical_not(
np.isnan([points_tensor[i][j][0] for i in range(nrows) for j in range(ncols)])
).sum()
many_sub_boxes = nboxes > len(points_tensor)
if many_sub_boxes:
nboxes += len(points_tensor)
yvalues = list(np.linspace(0, 1, num=nboxes + (hline_after is not None)))
yticks = []
yminorticks = []
default_yticklabels = []
plt.grid(
b=True,
axis="y",
which="major",
color=np.array([0.9, 0.9, 0.9]),
linestyle="-",
zorder=-2,
)
plt.minorticks_on()
plt.grid(
b=True,
axis="y",
which="minor",
color=np.array([0.9, 0.9, 0.9]),
linestyle="-",
zorder=-2,
)
ax = plt.gca()
ax.set_axisbelow(True)
# Hide the right and top spines
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
if hline_after is not None:
plt.axhline(
y=(yvalues[-(hline_after + 1)]), linewidth=lw / 8, color="black",
)
yvalues.pop(-(hline_after + 1))
if ncols == 1:
for y in yvalues:
plt.axhline(y=y, linewidth=lw / 16, color="lightgrey", zorder=-1)
for i in range(nrows):
ys = []
if many_sub_boxes and i != 0:
yvalues.pop()
for j in range(ncols):
if not np.isnan(points_tensor[i][j][0]):
ys.append(yvalues.pop())
try:
boxplot(
# nrows - i + y_offsets.pop(),
ys[-1],
np.array(points_tensor[i][j]),
color=np.array(
TABLEAU10_RGB[i % 10] if colors is None else colors[i]
)
/ 255,
delta=1 / (nboxes - 1),
lw=lw,
include_outliers=include_outliers,
)
except Exception as _:
pass
if len(ys) != 0:
yticks.append(np.max(ys))
yminorticks.extend(ys)
default_yticklabels.append(i + 1)
if optimal is not None and len(ys) != 0:
plt.vlines(
x=optimal[i],
ymin=min(ys) - 1 / (2 * (nboxes - 1)),
ymax=max(ys) + 1 / (2 * (nboxes - 1)),
colors="grey",
linewidth=0.5,
)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel=ylabel)
if yticklabels == "default":
plt.yticks(yticks, labels=default_yticklabels)
ax = plt.gca()
ax.tick_params("y", which="major", direction="inout")
ax.tick_params("y", which="minor", direction="in")
ax.set_yticks(yminorticks, minor=True)
else:
plt.yticks(yticks, labels=list(yticklabels))
plt.tight_layout()
if xlim is not None:
if xlim[0] != "proportional":
plt.xlim(xmin=xlim[0], xmax=xlim[1])
else:
plt.tight_layout()
_, right = plt.xlim()
_, xmin, xmax = xlim
right = max(xmin, min(xmax, right))
plt.xlim(xmin=0, xmax=right)
fig_size = (fig_size[0] * (1 - (xmax - right) / xmax), fig_size[1])
set_size(*fig_size)
if save_path is None:
plt.show()
else:
plt.savefig(
save_path, bbox_inches="tight",
)
plt.close()
print(f"Figure saved to {save_path}")
def plot_means_and_CIs(
means_mat: np.ndarray,
ci_lows_mat: np.ndarray,
ci_highs_mat: np.ndarray,
optimal: np.ndarray,
title="",
xlabel="",
ylabel=r"View Radius ($i$)",
yticklabels="default",
save_path: Optional[str] = None,
xlim: Optional[Tuple[float, float]] = None,
):
# fig = plt.figure(figsize=tuple(2 * np.array((5, 3))))
results_per_row = []
nrows, ncols = means_mat.shape[0], means_mat.shape[1]
for i in range(means_mat.shape[0]):
results_for_row = []
for j in range(means_mat.shape[1]):
if not np.isnan(means_mat[i, j]):
results_for_row.append(
(means_mat[i, j], ci_lows_mat[i, j], ci_highs_mat[i, j])
)
results_per_row.append(results_for_row)
for i in range(len(results_per_row)):
if optimal is not None:
plt.vlines(
x=optimal[i],
ymin=nrows - i - 0.5,
ymax=nrows - i + 0.5,
linestyles="dashed",
colors="grey",
)
means = [t[0] for t in results_per_row[i]]
ci_lows = [t[1] for t in results_per_row[i]]
ci_highs = [t[2] for t in results_per_row[i]]
nmeans = len(means)
y_offsets = -(
np.linspace(0.0 + 1 / (nmeans + 1), 1.0, num=nmeans, endpoint=False) - 0.5
)
plt.scatter(means, nrows - i + y_offsets)
plt.hlines(nrows - i + y_offsets, xmin=ci_lows, xmax=ci_highs)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel=ylabel)
if yticklabels == "default":
plt.yticks(range(1, nrows + 1), labels=nrows - np.array(range(nrows)))
else:
plt.yticks(range(1, nrows + 1), labels=list(reversed(yticklabels)))
plt.tight_layout()
if xlim is not None:
plt.xlim(xmin=xlim[0], xmax=xlim[1])
if save_path is None:
plt.show()
else:
plt.savefig(
save_path, bbox_inches="tight",
)
plt.close()
print(f"Figure saved to {save_path}")
if __name__ == "__main__":
lighthouse_save_dir = os.path.join(ADVISOR_TOP_LEVEL_DIR, "hp_runs", "lighthouse")
tsv_dir = os.path.join(lighthouse_save_dir, "pairwise_imitation_results")
if len(sys.argv) > 1:
file_paths = [os.path.join(tsv_dir, "{}__2_15.tsv".format(sys.argv[1]))]
else:
file_paths = glob.glob(os.path.join(tsv_dir, "*.tsv"))
for file_path in file_paths:
df = pd.read_csv(file_path, sep="\t")
df["early_exit"] = 1.0 * (df["train_steps"] < 300000)
bernoulli_keys = [
key
for key in df.keys()
if np.all(np.logical_or(df[key] == 1.0, df[key] == 0.0))
]
nan_expert_view_radii = np.isnan(df["expert_view_radius"])
assert (not nan_expert_view_radii.any()) or nan_expert_view_radii.all()
if nan_expert_view_radii.any():
df["expert_view_radius"] = 1
df_grouped_mean = df.groupby(by=["view_radius", "expert_view_radius"]).mean()
df_grouped_count = df.groupby(by=["view_radius", "expert_view_radius"]).count()
df_grouped_std = df.groupby(
by=["view_radius", "expert_view_radius"]
).std() / np.sqrt(df_grouped_count)
results_shape = (
int(np.max(df["view_radius"])),
int(np.max(df["expert_view_radius"])),
)
key_to_means: Dict[str, np.ndarray] = defaultdict(
lambda: np.full(results_shape, fill_value=float("nan"),)
)
key_to_stds = copy.deepcopy(key_to_means)
key_to_counts = copy.deepcopy(key_to_means)
key_to_point_tensors: Dict[str, np.ndarray] = defaultdict(
lambda: np.full(
(*results_shape, df_grouped_count.max()["avg_ep_length"],),
fill_value=float("nan"),
)
)
for view_radius, expert_view_radius in list(df_grouped_mean.index):
means = df_grouped_mean.loc[(view_radius, expert_view_radius)]
stds = df_grouped_std.loc[(view_radius, expert_view_radius)]
counts = df_grouped_count.loc[(view_radius, expert_view_radius)]
for key in means.keys():
if key == "seed":
continue
key_to_means[key][
int(view_radius) - 1, int(expert_view_radius) - 1
] = means[key]
key_to_stds[key][
int(view_radius) - 1, int(expert_view_radius) - 1
] = stds[key]
key_to_counts[key][
int(view_radius) - 1, int(expert_view_radius) - 1
] = counts[key]
points = np.array(
df.query(
"view_radius=={} and expert_view_radius=={}".format(
view_radius, expert_view_radius
)
)[key]
)
key_to_point_tensors[key][
int(view_radius) - 1, int(expert_view_radius) - 1, : points.shape[0]
] = points
key_to_ci_low = {}
key_to_ci_high = {}
for key in key_to_means:
if key == "seed":
continue
means = key_to_means[key]
stds = key_to_stds[key]
counts = key_to_counts[key]
key_to_ci_low[key] = np.zeros(means.shape)
key_to_ci_high[key] = np.zeros(means.shape)
low = key_to_ci_low[key]
high = key_to_ci_high[key]
for i in range(means.shape[0]):
for j in range(means.shape[1]):
mean = means[i, j]
count = counts[i, j]
std = stds[i, j]
if not np.isnan(mean):
if key in bernoulli_keys:
low[i, j], high[i, j] = proportion_confint(
count=mean * count,
nobs=count,
alpha=0.05,
method="jeffreys",
)
else:
low[i, j], high[i, j] = mean + 1.96 * std * np.array(
[-1, 1]
)
else:
low[i, j] = np.nan
high[i, j] = np.nan
save_plot_dir = "pairwise_plots_" + "_".join(
file_path.replace(".tsv", "").split("_")[-2:]
)
for key in [
"avg_ep_length",
]:
save_dir = os.path.join(lighthouse_save_dir, "metric_comparisons")
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(
save_dir,
"{}__{}.pdf".format(
key, os.path.split(file_path)[-1].replace(".tsv", "")
),
)
optimal = (
np.nanmin(key_to_means["optimal"], axis=1)
if key == "avg_ep_length"
else None
)
if key == "train_steps":
xlim = (0, None)
elif key == "avg_ep_length":
xlim = ("proportional", 110, 850)
else:
xlim = None
if key in bernoulli_keys:
plot_means_and_CIs(
means_mat=key_to_means[key],
ci_lows_mat=key_to_ci_low[key],
ci_highs_mat=key_to_ci_high[key],
optimal=optimal,
xlabel=key.replace("_", " ").title(),
save_path=save_path,
xlim=xlim,
)
else:
fig_size = (3, 3 * 3.0 / 5.0)
# THINNING
if key_to_point_tensors[key].shape[0] == 15:
for i in [13, 15]: # [3,7,11,15]:
key_to_point_tensors[key][i - 1, :, :] = np.nan
plot_boxplots(
points_tensor=key_to_point_tensors[key],
optimal=optimal,
xlabel=key.replace("_", " ").title(),
save_path=save_path,
xlim=xlim,
fig_size=fig_size,
)
| advisor-main | lighthouse_scripts/summarize_pairwise_imitation_data.py |
advisor-main | lighthouse_scripts/__init__.py |
|
import glob
import json
import os
import traceback
import warnings
from collections import defaultdict
from typing import Dict, Tuple, List
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.python.framework.errors_impl import DataLossError
from tensorflow.python.summary.summary_iterator import summary_iterator
from allenact.utils.misc_utils import TABLEAU10_RGB
_color_iter = iter(TABLEAU10_RGB)
HEAD_NAME_TO_COLOR: Dict[str, Tuple[int, int, int]] = defaultdict(
lambda: next(_color_iter)
)
VISUALIZATION = True
GENERATE_JSONS = True
OVERWRITE = True
if __name__ == "__main__":
# TODO: Allow for changing `experiment_tag` via command line arguments.
# experiment_tag = "advisor_ppo"
experiment_tag = "dagger_then_ppo"
cur_wd = os.getcwd()
assert os.path.basename(cur_wd) == "advisor"
saved_data_base_dir = "lighthouse_pairwise_training"
log_dir = os.path.join(cur_wd, saved_data_base_dir, "tb")
saved_processed_output_dir = os.path.join(
cur_wd, saved_data_base_dir, "logs_processed_to_jsons"
)
os.makedirs(saved_processed_output_dir, exist_ok=True)
if GENERATE_JSONS:
tag_to_train_or_valid_and_nice_key = {"train/ep_length": ("train", "ep_length")}
tsv_file_path = os.path.join(
saved_data_base_dir, "{}__2_10.tsv".format(experiment_tag)
)
df = pd.read_csv(tsv_file_path, sep="\t")
view_radii = set(int(x) if x is not None else x for x in df["view_radius"])
expert_view_radii = set(
int(x) if x is not None else x for x in df["expert_view_radius"]
)
for view_radius in view_radii:
for expert_view_radius in expert_view_radii:
log_save_path = os.path.join(
saved_processed_output_dir,
"{}__{}_{}__info.json".format(
experiment_tag, view_radius, expert_view_radius
),
)
if not OVERWRITE and os.path.exists(log_save_path):
print(
"{} already exists and we're not overwritting, skipping...".format(
os.path.basename(log_save_path)
)
)
continue
subdf = df.query(
"view_radius == {} & expert_view_radius == {}".format(
view_radius, expert_view_radius
)
)
ids = list(subdf["start_time_str"])
event_paths = [
p
for id in ids
for p in glob.glob(os.path.join(log_dir, "*", id, "events.out*"))
]
if len(event_paths) == 0:
continue
save_data_per_event_train: Dict[
str, List[List[Tuple[int, float]]]
] = defaultdict(lambda: [])
for event_path in event_paths:
save_data_per_metric_train: Dict[
str, List[Tuple[int, float]]
] = defaultdict(lambda: [])
try:
for summary in summary_iterator(event_path):
try:
step = summary.step
tag_and_value = summary.summary.value[0]
metric_id = tag_and_value.tag
if metric_id not in tag_to_train_or_valid_and_nice_key:
continue
value = tag_and_value.simple_value
(
train_or_val,
nice_tag,
) = tag_to_train_or_valid_and_nice_key[metric_id]
assert train_or_val == "train"
save_data_per_metric_train[nice_tag].append(
(step, value)
)
except Exception as _:
pass
except DataLossError as _:
warnings.warn("Data loss error in {}".format(event_path))
for k, v in save_data_per_metric_train.items():
save_data_per_event_train[k].append(v)
with open(log_save_path, "w",) as f:
json.dump(save_data_per_event_train, f)
if VISUALIZATION:
for file_path in glob.glob(os.path.join(saved_processed_output_dir, "*.json")):
plot_save_dir = os.path.join(cur_wd, "pairwise_plots", "train_curves")
os.makedirs(plot_save_dir, exist_ok=True)
save_plot_path = os.path.join(
plot_save_dir,
"{}.pdf".format(
"__".join(os.path.basename(file_path).split("__")[:-1])
),
)
if not OVERWRITE and os.path.exists(save_plot_path):
print(
"{} already exists and we're not overwritting, skipping...".format(
os.path.basename(save_plot_path)
)
)
continue
figsize = (4, 3)
overwrite = False
with open(file_path, "r") as f:
metric_values = json.load(f)
plt.figure(figsize=figsize)
try:
for step_and_ep_length_list in metric_values["ep_length"]:
x = [step for step, _ in step_and_ep_length_list]
y = [ep_length for _, ep_length in step_and_ep_length_list]
plt.plot(
x, y, color=(0.0, 0.0, 1.0, 0.2),
)
plt.xlim(9e3, 4e5)
plt.xlabel("Train steps")
plt.ylabel("Episode Length")
plt.xscale("log")
plt.savefig(save_plot_path, bbox_inches="tight")
except Exception as e:
traceback.print_tb(e.__traceback__)
print("Continuing")
finally:
plt.close()
| advisor-main | lighthouse_scripts/summarize_pairwise_imitation_train_curves.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.lighthouse_experiments.base import BaseLightHouseExperimentConfig
class LightHouseBC(BaseLightHouseExperimentConfig):
"""Find goal in lighthouse env using imitation learning.
Training with Imitation.
"""
def tag(self):
return "LightHouseBC"
def training_pipeline(self, **kwargs):
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
loss_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={"imitation_loss": loss_info["loss"]},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=training_steps,
),
],
num_mini_batch=loss_info["num_mini_batch"],
update_repeats=loss_info["update_repeats"],
)
| advisor-main | lighthouse_experiments/bc.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.lighthouse_experiments.base import BaseLightHouseExperimentConfig
class LightHouseBCThenPPO(BaseLightHouseExperimentConfig):
"""Dagger then ppo."""
def tag(self):
return "LightHouseBCThenPPO"
def training_pipeline(self, **kwargs):
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
steps_per_pipeline_stage = training_steps // 2
ppo_info = self.rl_loss_default("ppo", steps=steps_per_pipeline_stage)
imitation_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={
"imitation_loss": imitation_info["loss"],
"ppo_loss": ppo_info["loss"],
},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=steps_per_pipeline_stage,
),
PipelineStage(
loss_names=["ppo_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=steps_per_pipeline_stage,
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
)
| advisor-main | lighthouse_experiments/bc_then_ppo.py |
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.advisor.lighthouse_experiments.base import BaseLightHouseExperimentConfig
class LightHouseDagger(BaseLightHouseExperimentConfig):
"""Find goal in lighthouse env using imitation learning.
Training with Dagger.
"""
def tag(self):
return "LightHouseDagger"
def training_pipeline(self, **kwargs):
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
loss_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={"imitation_loss": loss_info["loss"]},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=training_steps // 2,
),
max_stage_steps=training_steps // 2,
),
PipelineStage(
loss_names=["imitation_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=training_steps // 2,
),
],
num_mini_batch=loss_info["num_mini_batch"],
update_repeats=loss_info["update_repeats"],
)
| advisor-main | lighthouse_experiments/dagger.py |
advisor-main | lighthouse_experiments/__init__.py |
|
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.advisor.lighthouse_experiments.base import BaseLightHouseExperimentConfig
class LightHouseBCTeacherForcing(BaseLightHouseExperimentConfig):
"""Find goal in lighthouse env using imitation learning.
Training with Imitation.
"""
def tag(self):
return "LightHouseBCTeacherForcing__lr_{}".format(self.lr(),)
def training_pipeline(self, **kwargs):
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
loss_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={"imitation_loss": loss_info["loss"]},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=1.0, steps=training_steps,
),
max_stage_steps=training_steps,
),
],
num_mini_batch=loss_info["num_mini_batch"],
update_repeats=loss_info["update_repeats"],
)
| advisor-main | lighthouse_experiments/bc_teacher_forcing.py |
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.advisor.lighthouse_experiments.base import BaseLightHouseExperimentConfig
class LightHouseImitationAndPPO(BaseLightHouseExperimentConfig):
"""Dagger then ppo."""
def tag(self):
return "LightHouseImitationAndPPO"
def training_pipeline(self, **kwargs):
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
steps_per_pipeline_stage = training_steps // 2
ppo_info = self.rl_loss_default("ppo", steps=steps_per_pipeline_stage)
imitation_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={
"imitation_loss": imitation_info["loss"],
"ppo_loss": ppo_info["loss"],
},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=steps_per_pipeline_stage,
),
max_stage_steps=steps_per_pipeline_stage,
),
PipelineStage(
loss_names=["ppo_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=steps_per_pipeline_stage,
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
)
| advisor-main | lighthouse_experiments/dagger_then_ppo.py |
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.advisor.lighthouse_experiments.base import BaseLightHouseExperimentConfig
class LightHouseBCTeacherForcingThenPPO(BaseLightHouseExperimentConfig):
"""Find goal in lighthouse env using imitation learning.
Training with Imitation.
"""
def tag(self):
return "LightHouseBCTeacherForcingThenPPO"
def training_pipeline(self, **kwargs):
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
steps_per_pipeline_stage = training_steps // 2
ppo_info = self.rl_loss_default("ppo", steps=steps_per_pipeline_stage)
imitation_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={
"imitation_loss": imitation_info["loss"],
"ppo_loss": ppo_info["loss"],
},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=1.0, steps=steps_per_pipeline_stage,
),
max_stage_steps=steps_per_pipeline_stage,
),
PipelineStage(
loss_names=["ppo_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=steps_per_pipeline_stage,
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
)
| advisor-main | lighthouse_experiments/bc_teacher_forcing_then_ppo.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.lighthouse_experiments.base import BaseLightHouseExperimentConfig
class LightHousePPO(BaseLightHouseExperimentConfig):
"""PPO only."""
def tag(self):
return "LightHousePPO"
def training_pipeline(self, **kwargs):
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
ppo_info = self.rl_loss_default("ppo", steps=training_steps)
return self._training_pipeline(
named_losses={"ppo_loss": ppo_info["loss"],},
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=training_steps,
),
],
num_mini_batch=ppo_info["num_mini_batch"],
update_repeats=ppo_info["update_repeats"],
)
| advisor-main | lighthouse_experiments/ppo.py |
from torch import nn
from advisor_losses import AdvisorWeightedStage
from allenact.base_abstractions.sensor import SensorSuite
from allenact.embodiedai.models.basic_models import RNNActorCritic
from allenact.utils.experiment_utils import PipelineStage
from allenact_plugins.lighthouse_plugin.lighthouse_models import (
LinearAdvisorActorCritic,
)
from lighthouse_experiments.base import BaseLightHouseExperimentConfig
class LightHouseAdvisorPPO(BaseLightHouseExperimentConfig):
"""PPO and Imitation with adaptive reweighting."""
def tag(self):
return "LightHouseAdvisorPPO"
def training_pipeline(self, **kwargs):
alpha = 20
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
ppo_info = self.rl_loss_default("ppo", steps=training_steps)
return self._training_pipeline(
named_losses={
"advisor_loss": AdvisorWeightedStage(
rl_loss=ppo_info["loss"], fixed_alpha=alpha, fixed_bound=0
)
},
pipeline_stages=[
PipelineStage(
loss_names=["advisor_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=training_steps,
),
],
num_mini_batch=ppo_info["num_mini_batch"],
update_repeats=ppo_info["update_repeats"],
)
def create_model(self, **kwargs) -> nn.Module:
sensors = self.get_sensors()
if self.exp_params.RECURRENT_MODEL:
return RNNActorCritic(
input_uuid=sensors[0].uuid,
action_space=self._action_space(),
observation_space=SensorSuite(sensors).observation_spaces,
rnn_type="LSTM",
head_type=Builder( # type: ignore
LinearAdvisorActorCritic,
kwargs={"ensure_same_init_aux_weights": False},
),
)
else:
return LinearAdvisorActorCritic(
input_uuid=sensors[0].uuid,
action_space=self._action_space(),
observation_space=SensorSuite(sensors).observation_spaces,
ensure_same_init_aux_weights=False,
)
| advisor-main | lighthouse_experiments/advisor_ppo.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.lighthouse_experiments.base import BaseLightHouseExperimentConfig
class LightHouseA2C(BaseLightHouseExperimentConfig):
"""A2C only."""
def tag(self):
return "LightHouseA2C"
def training_pipeline(self, **kwargs):
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
ppo_info = self.rl_loss_default("a2c", steps=training_steps)
return self._training_pipeline(
named_losses={"a2c_loss": ppo_info["loss"],},
pipeline_stages=[
PipelineStage(
loss_names=["a2c_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=training_steps,
),
],
num_mini_batch=ppo_info["num_mini_batch"],
update_repeats=ppo_info["update_repeats"],
)
| advisor-main | lighthouse_experiments/a2c.py |
from advisor_losses import AdvisorWeightedStage
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.lighthouse_experiments.advisor_ppo import LightHouseAdvisorPPO
class LightHouseAdvisorA2C(LightHouseAdvisorPPO):
"""A2C and Imitation with adaptive reweighting."""
def tag(self):
return "LightHouseAdvisorA2C"
def training_pipeline(self, **kwargs):
alpha = 20
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
a2c_info = self.rl_loss_default("a2c", steps=training_steps)
return self._training_pipeline(
named_losses={
"advisor_loss": AdvisorWeightedStage(
rl_loss=a2c_info["loss"], fixed_alpha=alpha, fixed_bound=0
),
},
pipeline_stages=[
PipelineStage(
loss_names=["advisor_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=training_steps,
),
],
num_mini_batch=a2c_info["num_mini_batch"],
update_repeats=a2c_info["update_repeats"],
)
| advisor-main | lighthouse_experiments/advisor_a2c.py |
import math
from abc import ABC
from typing import Dict, Any, List, Optional, Tuple, Union, NamedTuple
import gym
import torch
import torch.nn as nn
from torch import optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO, A2C
from allenact.algorithms.onpolicy_sync.losses.a2cacktr import A2CConfig
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.base_abstractions.misc import Loss
from allenact.base_abstractions.sensor import (
SensorSuite,
Sensor,
ExpertActionSensor,
)
from allenact.base_abstractions.task import TaskSampler
from allenact.embodiedai.models.basic_models import LinearActorCritic, RNNActorCritic
from allenact.utils.experiment_utils import (
Builder,
LinearDecay,
PipelineStage,
TrainingPipeline,
)
from allenact_plugins.lighthouse_plugin.lighthouse_environment import (
LightHouseEnvironment,
)
from allenact_plugins.lighthouse_plugin.lighthouse_sensors import (
FactorialDesignCornerSensor,
)
from allenact_plugins.lighthouse_plugin.lighthouse_tasks import (
FindGoalLightHouseTaskSampler,
)
from allenact_plugins.lighthouse_plugin.lighthouse_util import StopIfNearOptimal
class LighthouseExperimentParams(NamedTuple):
WORLD_DIM: int = 2
VIEW_RADIUS: int = 1
EXPERT_VIEW_RADIUS: int = 15
WORLD_RADIUS: int = 15
DEGREE: int = -1
MAX_STEPS: int = 1000
GPU_ID: Optional[int] = None
NUM_TRAIN_SAMPLERS: int = 20 if torch.cuda.is_available() else 2
NUM_TEST_TASKS: int = 200
RECURRENT_MODEL: bool = False
TOTAL_TRAIN_STEPS: int = int(3e5)
SHOULD_LOG: bool = False if torch.cuda.is_available() else True
TEST_SEED_OFFSET: int = 0
CKPTS_TO_SAVE: int = 1
# `LR` chosen by optimizing the performance of imitation learning
LR: float = 0.0242
class BaseLightHouseExperimentConfig(ExperimentConfig, ABC):
"""Base experimental config."""
_SENSOR_CACHE: Dict[Tuple[int, int, Optional[int], int], List[Sensor]] = {}
def __init__(self, **kwargs):
self.exp_params = LighthouseExperimentParams(**kwargs)
def lr(self):
return self.exp_params.LR
def _action_space(self):
return gym.spaces.Discrete(2 * self.exp_params.WORLD_DIM)
def get_sensors(self):
key = (
self.exp_params.VIEW_RADIUS,
self.exp_params.WORLD_DIM,
(None if self.exp_params.RECURRENT_MODEL else self.exp_params.DEGREE),
self.exp_params.EXPERT_VIEW_RADIUS,
)
assert (not self.exp_params.RECURRENT_MODEL) or self.exp_params.DEGREE == 1
if key not in self._SENSOR_CACHE:
sensors = [
FactorialDesignCornerSensor(
view_radius=self.exp_params.VIEW_RADIUS,
world_dim=self.exp_params.WORLD_DIM,
degree=self.exp_params.DEGREE,
)
]
if self.exp_params.EXPERT_VIEW_RADIUS:
sensors.append(
ExpertActionSensor(
expert_args={
"expert_view_radius": self.exp_params.EXPERT_VIEW_RADIUS,
"deterministic": True,
},
action_space=self._action_space(),
)
)
self._SENSOR_CACHE[key] = sensors
return self._SENSOR_CACHE[key]
def optimal_ave_ep_length(self):
return LightHouseEnvironment.optimal_ave_ep_length(
world_dim=self.exp_params.WORLD_DIM,
world_radius=self.exp_params.WORLD_RADIUS,
view_radius=self.exp_params.VIEW_RADIUS,
)
def get_early_stopping_criterion(self):
optimal_ave_ep_length = self.optimal_ave_ep_length()
return StopIfNearOptimal(
optimal=optimal_ave_ep_length,
deviation=optimal_ave_ep_length * 0.05,
min_memory_size=50,
)
def rl_loss_default(self, alg: str, steps: Optional[int] = None):
if alg == "ppo":
assert steps is not None
return {
"loss": (PPO(clip_decay=LinearDecay(steps), **PPOConfig)),
"num_mini_batch": 2,
"update_repeats": 4,
}
elif alg == "a2c":
return {
"loss": A2C(**A2CConfig),
"num_mini_batch": 1,
"update_repeats": 1,
}
elif alg == "imitation":
return {
"loss": Imitation(),
"num_mini_batch": 2,
"update_repeats": 4,
}
else:
raise NotImplementedError
def _training_pipeline(
self,
named_losses: Dict[str, Union[Loss, Builder]],
pipeline_stages: List[PipelineStage],
num_mini_batch: int,
update_repeats: int,
lr: Optional[float] = None,
):
# When using many mini-batches or update repeats, decrease the learning
# rate so that the approximate size of the gradient update is similar.
lr = self.exp_params.LR if lr is None else lr
num_steps = 100
metric_accumulate_interval = (
self.exp_params.MAX_STEPS * 10
) # Log every 10 max length tasks
gamma = 0.99
if self.exp_params.CKPTS_TO_SAVE == 0:
save_interval = None
else:
save_interval = math.ceil(
sum(ps.max_stage_steps for ps in pipeline_stages)
/ self.exp_params.CKPTS_TO_SAVE
)
use_gae = "reinforce_loss" not in named_losses
gae_lambda = 1.0
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses=named_losses,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=None,
should_log=self.exp_params.SHOULD_LOG,
pipeline_stages=pipeline_stages,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=self.exp_params.TOTAL_TRAIN_STEPS)} # type: ignore
),
)
def machine_params(
self, mode="train", gpu_id="default", n_train_processes="default", **kwargs
):
if mode == "train":
if n_train_processes == "default":
nprocesses = self.exp_params.NUM_TRAIN_SAMPLERS
else:
nprocesses = n_train_processes
elif mode == "valid":
nprocesses = 0
elif mode == "test":
nprocesses = min(
self.exp_params.NUM_TEST_TASKS, 500 if torch.cuda.is_available() else 50
)
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
if gpu_id == "default":
gpu_ids = [] if self.exp_params.GPU_ID is None else [self.exp_params.GPU_ID]
else:
gpu_ids = [gpu_id]
return MachineParams(nprocesses=nprocesses, devices=gpu_ids)
def create_model(self, **kwargs) -> nn.Module:
sensors = self.get_sensors()
if self.exp_params.RECURRENT_MODEL:
return RNNActorCritic(
input_uuid=sensors[0].uuid,
action_space=self._action_space(),
observation_space=SensorSuite(sensors).observation_spaces,
rnn_type="LSTM",
)
else:
return LinearActorCritic(
input_uuid=sensors[0].uuid,
action_space=self._action_space(),
observation_space=SensorSuite(sensors).observation_spaces,
)
def make_sampler_fn(self, **kwargs) -> TaskSampler:
return FindGoalLightHouseTaskSampler(**kwargs)
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return {
"world_dim": self.exp_params.WORLD_DIM,
"world_radius": self.exp_params.WORLD_RADIUS,
"max_steps": self.exp_params.MAX_STEPS,
"sensors": self.get_sensors(),
"action_space": self._action_space(),
"seed": seeds[process_ind] if seeds is not None else None,
}
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
raise RuntimeError
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
max_tasks = self.exp_params.NUM_TEST_TASKS // total_processes + (
process_ind < (self.exp_params.NUM_TEST_TASKS % total_processes)
)
task_seeds_list = [
2 ** 31
- 1
+ self.exp_params.TEST_SEED_OFFSET
+ process_ind
+ total_processes * i
for i in range(max_tasks)
]
assert min(task_seeds_list) >= 0 and max(task_seeds_list) <= 2 ** 32 - 1
train_sampler_args = self.train_task_sampler_args(
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
return {
**train_sampler_args,
"task_seeds_list": task_seeds_list,
"max_tasks": max_tasks,
"deterministic_sampling": True,
}
| advisor-main | lighthouse_experiments/base.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.lighthouse_experiments.base import BaseLightHouseExperimentConfig
class LightHouseBCAndPPO(BaseLightHouseExperimentConfig):
"""PPO and Imitation jointly."""
def tag(self):
return "LightHouseBCAndPPO"
def training_pipeline(self, **kwargs):
training_steps = self.exp_params.TOTAL_TRAIN_STEPS
ppo_info = self.rl_loss_default("ppo", steps=training_steps)
imitation_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={
"imitation_loss": imitation_info["loss"],
"ppo_loss": ppo_info["loss"],
},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss", "ppo_loss"],
early_stopping_criterion=self.get_early_stopping_criterion(),
max_stage_steps=training_steps,
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
)
| advisor-main | lighthouse_experiments/bc_and_ppo.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdBC(BaseExperimentConfig):
"""Training with behavior cloning."""
def __init__(self, task_name: str, **kwargs):
super().__init__(task_name=task_name, USE_EXPERT=True, **kwargs)
def extra_tag(self):
return "BC__lr_{}".format(self.exp_params.LR)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
loss_info = self.rl_loss_default("imitation")
return self._training_pipeline(
num_mini_batch=loss_info["num_mini_batch"],
update_repeats=loss_info["update_repeats"],
named_losses={"imitation_loss": loss_info["loss"]},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=training_steps,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
],
)
| advisor-main | minigrid_and_pd_experiments/bc.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdBCThenPPO(BaseExperimentConfig):
"""Training with behavior cloning and then PPO."""
def __init__(self, task_name: str, **kwargs):
super().__init__(task_name=task_name, USE_EXPERT=True, **kwargs)
def extra_tag(self):
return "BCThenPPO__bc_{}__lr_{}".format(
self.exp_params.TF_RATIO, self.exp_params.LR
)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
steps_imitation_stage = int(training_steps * self.exp_params.TF_RATIO)
steps_ppo_stage = training_steps - steps_imitation_stage
ppo_info = self.rl_loss_default("ppo", steps=steps_ppo_stage)
imitation_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={
"imitation_loss": imitation_info["loss"],
"ppo_loss": ppo_info["loss"],
},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=steps_imitation_stage,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
PipelineStage(
loss_names=["ppo_loss"],
max_stage_steps=steps_ppo_stage,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
)
| advisor-main | minigrid_and_pd_experiments/bc_then_ppo.py |
from advisor_losses import (
AdvisorImitationStage,
AdvisorWeightedStage,
)
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdBCTeacherForcingThenAdvisor(BaseExperimentConfig):
"""Training with behavior cloning (teacher forcing of 1) followed by
adaptive reweighing."""
def __init__(self, task_name: str, **kwargs):
super().__init__(
task_name=task_name,
USE_EXPERT=True,
SAME_INIT_VALS_FOR_ADVISOR_HEAD=False,
INCLUDE_AUXILIARY_HEAD=True,
**kwargs
)
def extra_tag(self):
return "BCTeacherForcingThenAdvisor__alpha_{}__lr_{}__tf_{}".format(
self.exp_params.FIXED_ALPHA, self.exp_params.LR, self.exp_params.TF_RATIO,
)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
steps_advisor_warmup_stage = int(training_steps * self.exp_params.TF_RATIO)
steps_advisor_weighted_stage = training_steps - steps_advisor_warmup_stage
ppo_info = self.rl_loss_default("ppo", steps=steps_advisor_weighted_stage)
fixed_alpha = self.exp_params.FIXED_ALPHA
return self._training_pipeline(
named_losses={
"advisor_imitation_warmup": AdvisorImitationStage(),
"advisor_loss": AdvisorWeightedStage(
rl_loss=ppo_info["loss"], fixed_alpha=fixed_alpha, fixed_bound=0,
),
},
pipeline_stages=[
PipelineStage(
loss_names=["advisor_imitation_warmup"],
max_stage_steps=steps_advisor_warmup_stage,
teacher_forcing=LinearDecay(
startp=1.0, endp=1.0, steps=steps_advisor_warmup_stage,
),
),
PipelineStage(
loss_names=["advisor_loss"],
max_stage_steps=steps_advisor_weighted_stage,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
],
num_mini_batch=ppo_info["num_mini_batch"],
update_repeats=ppo_info["update_repeats"],
)
| advisor-main | minigrid_and_pd_experiments/bc_teacher_forcing_then_advisor.py |
from allenact.utils.experiment_utils import PipelineStage, OffPolicyPipelineComponent
from allenact_plugins.minigrid_plugin.minigrid_offpolicy import (
MiniGridOffPolicyExpertCELoss,
)
from poisoneddoors_plugin.poisoneddoors_offpolicy import (
PoisonedDoorsOffPolicyExpertCELoss,
)
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdPureOffPolicy(BaseExperimentConfig):
"""Off policy imitation."""
def extra_tag(self):
return "PureOffPolicyBC__lr_{}".format(self.exp_params.LR)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
offpolicy_demo_info = self.offpolicy_demo_defaults(also_using_ppo=False)
if self.task_name == "PoisonedDoors":
offpolicy_expert_ce_loss = PoisonedDoorsOffPolicyExpertCELoss()
else:
# MiniGrid Tasks
offpolicy_expert_ce_loss = MiniGridOffPolicyExpertCELoss()
return self._training_pipeline(
named_losses={"offpolicy_expert_ce_loss": offpolicy_expert_ce_loss},
pipeline_stages=[
PipelineStage(
loss_names=[],
max_stage_steps=training_steps,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
offpolicy_component=OffPolicyPipelineComponent(
data_iterator_builder=offpolicy_demo_info[
"data_iterator_builder"
],
loss_names=["offpolicy_expert_ce_loss"],
updates=offpolicy_demo_info["offpolicy_updates"],
),
),
],
num_mini_batch=0,
update_repeats=0,
)
| advisor-main | minigrid_and_pd_experiments/pure_offpolicy.py |
from advisor_losses import AdvisorWeightedStage
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdAdvisor(BaseExperimentConfig):
"""Training with adaptive reweighing."""
def __init__(self, task_name: str, **kwargs):
super().__init__(
task_name=task_name,
USE_EXPERT=True,
SAME_INIT_VALS_FOR_ADVISOR_HEAD=False,
INCLUDE_AUXILIARY_HEAD=True,
**kwargs
)
def extra_tag(self):
return "Advisor__alpha_{}__lr_{}".format(
self.exp_params.FIXED_ALPHA, self.exp_params.LR,
)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
ppo_info = self.rl_loss_default("ppo", steps=training_steps)
alpha = self.exp_params.FIXED_ALPHA
return self._training_pipeline(
named_losses={
"advisor_loss": AdvisorWeightedStage(
rl_loss=ppo_info["loss"], fixed_alpha=alpha, fixed_bound=0
)
},
pipeline_stages=[
PipelineStage(
loss_names=["advisor_loss"],
max_stage_steps=training_steps,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
],
num_mini_batch=ppo_info["num_mini_batch"],
update_repeats=ppo_info["update_repeats"],
)
| advisor-main | minigrid_and_pd_experiments/advisor.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdBCThenPPO(BaseExperimentConfig):
"""Training with behavior cloning and then PPO."""
def __init__(self, task_name: str, **kwargs):
super().__init__(task_name=task_name, USE_EXPERT=True, **kwargs)
def extra_tag(self):
return "BCWithPPO__bc_{}__lr_{}".format(
self.exp_params.TF_RATIO, self.exp_params.LR
)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
ppo_info = self.rl_loss_default("ppo", steps=training_steps)
imitation_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={
"imitation_loss": imitation_info["loss"],
"ppo_loss": ppo_info["loss"],
},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss", "ppo_loss"],
max_stage_steps=training_steps,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
)
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
)
| advisor-main | minigrid_and_pd_experiments/bc_with_ppo.py |
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdDagger(BaseExperimentConfig):
"""Training with DAgger."""
def __init__(self, task_name: str, **kwargs):
super().__init__(task_name=task_name, USE_EXPERT=True, **kwargs)
def extra_tag(self):
return "Dagger__tf_{}__lr_{}".format(
self.exp_params.TF_RATIO, self.exp_params.LR
)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
steps_tf_stage = int(training_steps * self.exp_params.TF_RATIO)
steps_bc_stage = training_steps - steps_tf_stage
imitation_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={"imitation_loss": imitation_info["loss"]},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=steps_tf_stage,
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=steps_tf_stage,
),
),
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=steps_bc_stage,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
],
num_mini_batch=imitation_info["num_mini_batch"],
update_repeats=imitation_info["update_repeats"],
)
| advisor-main | minigrid_and_pd_experiments/dagger.py |
advisor-main | minigrid_and_pd_experiments/__init__.py |
|
from advisor_losses import (
AdvisorImitationStage,
AdvisorWeightedStage,
)
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdDaggerThenAdvisor(BaseExperimentConfig):
"""Training with DAgger followed adaptive reweighing."""
def __init__(self, task_name: str, **kwargs):
super().__init__(
task_name=task_name,
USE_EXPERT=True,
SAME_INIT_VALS_FOR_ADVISOR_HEAD=False,
INCLUDE_AUXILIARY_HEAD=True,
**kwargs
)
def extra_tag(self):
return "DaggerThenAdvisor__alpha_{}__lr_{}__tf_{}".format(
self.exp_params.FIXED_ALPHA, self.exp_params.LR, self.exp_params.TF_RATIO,
)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
steps_advisor_warmup_stage = int(training_steps * self.exp_params.TF_RATIO)
steps_advisor_weighted_stage = training_steps - steps_advisor_warmup_stage
ppo_info = self.rl_loss_default("ppo", steps=steps_advisor_weighted_stage)
fixed_alpha = self.exp_params.FIXED_ALPHA
return self._training_pipeline(
named_losses={
"advisor_imitation_warmup": AdvisorImitationStage(),
"advisor_loss": AdvisorWeightedStage(
rl_loss=ppo_info["loss"], fixed_alpha=fixed_alpha, fixed_bound=0,
),
},
pipeline_stages=[
PipelineStage(
loss_names=["advisor_imitation_warmup"],
max_stage_steps=steps_advisor_warmup_stage,
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=steps_advisor_warmup_stage,
),
),
PipelineStage(
loss_names=["advisor_loss"],
max_stage_steps=steps_advisor_weighted_stage,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
],
num_mini_batch=ppo_info["num_mini_batch"],
update_repeats=ppo_info["update_repeats"],
)
| advisor-main | minigrid_and_pd_experiments/dagger_then_advisor.py |
from advisor_losses import MiniGridOffPolicyAdvisorLoss
from allenact.utils.experiment_utils import PipelineStage, OffPolicyPipelineComponent
from poisoneddoors_plugin.poisoneddoors_offpolicy import (
PoisonedDoorsOffPolicyAdvisorLoss,
)
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdPPOWithOffPolicyAdvisor(BaseExperimentConfig):
"""PPO and Imitation with adaptive reweighting."""
def __init__(self, task_name: str, **kwargs):
super().__init__(
task_name=task_name,
SAME_INIT_VALS_FOR_ADVISOR_HEAD=False,
INCLUDE_AUXILIARY_HEAD=True,
**kwargs
)
def extra_tag(self):
return "AdvisorOffPolicy__alpha_{}__lr_{}".format(
self.exp_params.FIXED_ALPHA, self.exp_params.LR,
)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
ppo_info = self.rl_loss_default("ppo", steps=training_steps)
offpolicy_demo_info = self.offpolicy_demo_defaults(also_using_ppo=True)
fixed_alpha = self.exp_params.FIXED_ALPHA
assert fixed_alpha is not None
if self.task_name == "PoisonedDoors":
offpolicy_advisor_loss = PoisonedDoorsOffPolicyAdvisorLoss(
fixed_alpha=fixed_alpha, fixed_bound=0
)
else:
# MiniGrid Tasks
offpolicy_advisor_loss = MiniGridOffPolicyAdvisorLoss(
fixed_alpha=fixed_alpha, fixed_bound=0
)
return self._training_pipeline(
named_losses={
"ppo_loss": ppo_info["loss"],
"offpolicy_advisor_loss": offpolicy_advisor_loss,
},
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss"],
max_stage_steps=training_steps,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
offpolicy_component=OffPolicyPipelineComponent(
data_iterator_builder=offpolicy_demo_info[
"data_iterator_builder"
],
loss_names=["offpolicy_advisor_loss"],
updates=offpolicy_demo_info["offpolicy_updates"],
),
),
],
num_mini_batch=offpolicy_demo_info["ppo_num_mini_batch"],
update_repeats=offpolicy_demo_info["ppo_update_repeats"],
)
| advisor-main | minigrid_and_pd_experiments/ppo_with_offpolicy_advisor.py |
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdBCTeacherForcing(BaseExperimentConfig):
"""Training with behavior cloning with teacher forcing of 1."""
def __init__(self, task_name: str, **kwargs):
super().__init__(task_name=task_name, USE_EXPERT=True, **kwargs)
def extra_tag(self):
return "BC_TEACHER_FORCING__lr_{}".format(self.exp_params.LR)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
loss_info = self.rl_loss_default("imitation")
return self._training_pipeline(
num_mini_batch=loss_info["num_mini_batch"],
update_repeats=loss_info["update_repeats"],
named_losses={"imitation_loss": loss_info["loss"]},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=training_steps,
teacher_forcing=LinearDecay(
startp=1.0, endp=1.0, steps=training_steps,
),
),
],
)
| advisor-main | minigrid_and_pd_experiments/bc_teacher_forcing.py |
from allenact.utils.experiment_utils import PipelineStage, OffPolicyPipelineComponent
from allenact_plugins.minigrid_plugin.minigrid_offpolicy import (
MiniGridOffPolicyExpertCELoss,
)
from poisoneddoors_plugin.poisoneddoors_offpolicy import (
PoisonedDoorsOffPolicyExpertCELoss,
)
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdPPOWithOffPolicyBC(BaseExperimentConfig):
"""PPO and off policy imitation."""
def extra_tag(self):
return "PPOAndOffPolicyBC__lr_{}".format(self.exp_params.LR)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
ppo_info = self.rl_loss_default("ppo", steps=training_steps)
offpolicy_demo_info = self.offpolicy_demo_defaults(also_using_ppo=True)
if self.task_name == "PoisonedDoors":
offpolicy_expert_ce_loss = PoisonedDoorsOffPolicyExpertCELoss()
else:
# MiniGrid Tasks
offpolicy_expert_ce_loss = MiniGridOffPolicyExpertCELoss()
return self._training_pipeline(
named_losses={
"ppo_loss": ppo_info["loss"],
"offpolicy_expert_ce_loss": offpolicy_expert_ce_loss,
},
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss"],
max_stage_steps=training_steps,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
offpolicy_component=OffPolicyPipelineComponent(
data_iterator_builder=offpolicy_demo_info[
"data_iterator_builder"
],
loss_names=["offpolicy_expert_ce_loss"],
updates=offpolicy_demo_info["offpolicy_updates"],
),
),
],
num_mini_batch=offpolicy_demo_info["ppo_num_mini_batch"],
update_repeats=offpolicy_demo_info["ppo_update_repeats"],
)
| advisor-main | minigrid_and_pd_experiments/ppo_with_offpolicy.py |
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdDaggerThenPPO(BaseExperimentConfig):
"""Training with DAgger and then PPO."""
def __init__(self, task_name: str, **kwargs):
super().__init__(task_name=task_name, USE_EXPERT=True, **kwargs)
def extra_tag(self):
return "DaggerThenPPO__lr_{}__tf_{}".format(
self.exp_params.LR, self.exp_params.TF_RATIO,
)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
steps_dagger_stage = int(training_steps * self.exp_params.TF_RATIO)
steps_ppo_stage = training_steps - steps_dagger_stage
ppo_info = self.rl_loss_default("ppo", steps=steps_ppo_stage)
imitation_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={
"imitation_loss": imitation_info["loss"],
"ppo_loss": ppo_info["loss"],
},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=steps_dagger_stage,
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=steps_dagger_stage,
),
),
PipelineStage(
loss_names=["ppo_loss"],
max_stage_steps=steps_ppo_stage,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
)
| advisor-main | minigrid_and_pd_experiments/dagger_then_ppo.py |
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdBCThenPPO(BaseExperimentConfig):
"""Training with behavior cloning (teacher forcing of 1) and then PPO."""
def __init__(self, task_name: str, **kwargs):
super().__init__(task_name=task_name, USE_EXPERT=True, **kwargs)
def extra_tag(self):
return "BCTeacherForcingThenPPO__bc_{}__lr_{}".format(
self.exp_params.TF_RATIO, self.exp_params.LR
)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
steps_imitation_stage = int(training_steps * self.exp_params.TF_RATIO)
steps_ppo_stage = training_steps - steps_imitation_stage
ppo_info = self.rl_loss_default("ppo", steps=steps_ppo_stage)
imitation_info = self.rl_loss_default("imitation")
return self._training_pipeline(
named_losses={
"imitation_loss": imitation_info["loss"],
"ppo_loss": ppo_info["loss"],
},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=steps_imitation_stage,
teacher_forcing=LinearDecay(
startp=1.0, endp=1.0, steps=training_steps,
),
),
PipelineStage(
loss_names=["ppo_loss"],
max_stage_steps=steps_ppo_stage,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
)
| advisor-main | minigrid_and_pd_experiments/bc_teacher_forcing_then_ppo.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdPPO(BaseExperimentConfig):
"""Training with PPO."""
def __init__(self, task_name: str, **kwargs):
super().__init__(task_name=task_name, USE_EXPERT=False, **kwargs)
def extra_tag(self):
return "PPO__lr_{}".format(self.exp_params.LR)
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
loss_info = self.rl_loss_default("ppo", steps=training_steps)
return self._training_pipeline(
named_losses={"ppo_loss": loss_info["loss"],},
num_mini_batch=loss_info["num_mini_batch"],
update_repeats=loss_info["update_repeats"],
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss"],
max_stage_steps=training_steps,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
],
)
| advisor-main | minigrid_and_pd_experiments/ppo.py |
from typing import cast
import gym
from torch import nn
from advisor_losses import GAILDiscriminatorLoss, GAILPPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.base_abstractions.sensor import SensorSuite
from allenact.embodiedai.models.basic_models import LinearActorCritic
from allenact.utils.experiment_utils import PipelineStage
from allenact_plugins.lighthouse_plugin.lighthouse_models import (
LinearAdvisorActorCritic,
)
from allenact_plugins.minigrid_plugin.minigrid_sensors import EgocentricMiniGridSensor
from gail_models import MiniGridSimpleConvRNNWithDiscriminator
from poisoneddoors_plugin.poisoneddoors_models import (
RNNActorCriticWithEmbedAndDiscriminator,
)
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
class MgPdGAIL(BaseExperimentConfig):
"""Training with adaptive reweighing."""
USE_EXPERT = False
ALSO_USING_PPO = True
def create_model(self, **kwargs) -> nn.Module:
sensors = self.get_sensors()
task_info = self.task_info()
if task_info["name"] == "PoisonedDoors":
return RNNActorCriticWithEmbedAndDiscriminator(
input_uuid=sensors[0].uuid,
num_embeddings=4,
embedding_dim=128,
input_len=1,
action_space=gym.spaces.Discrete(
3 + task_info["env_info"]["num_doors"]
),
observation_space=SensorSuite(sensors).observation_spaces,
rnn_type=self.exp_params.RNN_TYPE,
head_type=LinearActorCritic
if not self.exp_params.INCLUDE_AUXILIARY_HEAD
else Builder( # type: ignore
LinearAdvisorActorCritic,
kwargs={
"ensure_same_init_aux_weights": self.exp_params.SAME_INIT_VALS_FOR_ADVISOR_HEAD
},
),
)
else:
# Model for MiniGrid tasks
return MiniGridSimpleConvRNNWithDiscriminator(
action_space=gym.spaces.Discrete(
len(task_info["task_class"].class_action_names())
),
num_objects=cast(EgocentricMiniGridSensor, sensors[0]).num_objects,
num_colors=cast(EgocentricMiniGridSensor, sensors[0]).num_colors,
num_states=cast(EgocentricMiniGridSensor, sensors[0]).num_states,
observation_space=SensorSuite(sensors).observation_spaces,
hidden_size=128,
rnn_type=self.exp_params.RNN_TYPE,
head_type=LinearActorCritic
if not self.exp_params.INCLUDE_AUXILIARY_HEAD
else Builder( # type: ignore
LinearAdvisorActorCritic,
kwargs={
"ensure_same_init_aux_weights": self.exp_params.SAME_INIT_VALS_FOR_ADVISOR_HEAD
},
),
recurrent_discriminator=True,
)
def extra_tag(self):
return f"GAIL__lr_{self.exp_params.LR}"
def training_pipeline(self, **kwargs):
training_steps = self.total_train_steps()
offpolicy_demo_info = self.offpolicy_demo_defaults(
also_using_ppo=False # We are but don't say so as this would reduce update repeats.
)
ppo_defaults = self.rl_loss_default("ppo", 1)
gamma = 0.99
use_gae = True
gae_lambda = 1.0
assert ppo_defaults["update_repeats"] % 2 == 0
ppo_update_repeats = ppo_defaults["update_repeats"]
gail_update_repeats = 5 # Default from ikostrikov
gail_warmup_update_repeats = 100 # Default from ikostrikov
gail_warmup_training_steps = min(
training_steps,
10 * (self.exp_params.NUM_TRAIN_SAMPLERS * self.exp_params.ROLLOUT_STEPS),
)
assert (
gail_warmup_training_steps <= training_steps // 10
) # Don't spend more than 10% of training on warmup
after_warmup_training_steps = training_steps - gail_warmup_training_steps
return self._training_pipeline(
named_losses={
"gail_discriminator_loss": GAILDiscriminatorLoss(
data_iterator_builder=offpolicy_demo_info["data_iterator_builder"],
discriminator_observation_uuid="poisoned_door_state"
if self.task_name == "PoisonedDoors"
else "minigrid_ego_image",
),
"gail_ppo_loss": GAILPPO(
**{
**PPOConfig,
"gamma": gamma,
"use_gae": use_gae,
"gae_lambda": gae_lambda,
"nrollouts": self.exp_params.NUM_TRAIN_SAMPLERS
// ppo_defaults["num_mini_batch"],
"rollout_len": self.exp_params.ROLLOUT_STEPS,
},
),
},
pipeline_stages=[
PipelineStage(
loss_names=["gail_discriminator_loss"], # Warmup
loss_update_repeats=[gail_warmup_update_repeats],
max_stage_steps=gail_warmup_training_steps,
),
PipelineStage(
loss_names=["gail_discriminator_loss", "gail_ppo_loss"],
loss_update_repeats=[gail_update_repeats, ppo_update_repeats],
max_stage_steps=after_warmup_training_steps,
early_stopping_criterion=self.task_info().get(
"early_stopping_criterion"
),
),
],
num_mini_batch=ppo_defaults["num_mini_batch"],
update_repeats=None, # Specified in the pipeline stage
)
| advisor-main | minigrid_and_pd_experiments/gail.py |
import abc
import math
import os
from typing import (
Optional,
List,
Any,
Dict,
cast,
Sequence,
Callable,
Union,
NamedTuple,
)
import gym
import torch
from gym_minigrid.minigrid import Lava, WorldObj, Wall
from torch import nn, optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO, A2C
from allenact.algorithms.onpolicy_sync.losses.a2cacktr import A2CConfig
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.base_abstractions.misc import Loss
from allenact.base_abstractions.sensor import SensorSuite, Sensor, ExpertActionSensor
from allenact.embodiedai.models.basic_models import LinearActorCritic
from allenact.utils.experiment_utils import (
LinearDecay,
Builder,
PipelineStage,
TrainingPipeline,
)
from allenact_plugins.lighthouse_plugin.lighthouse_models import (
LinearAdvisorActorCritic,
)
from allenact_plugins.minigrid_plugin.minigrid_environments import (
FastCrossing,
AskForHelpSimpleCrossing,
)
from allenact_plugins.minigrid_plugin.minigrid_models import MiniGridSimpleConvRNN
from allenact_plugins.minigrid_plugin.minigrid_offpolicy import (
create_minigrid_offpolicy_data_iterator,
)
from allenact_plugins.minigrid_plugin.minigrid_sensors import EgocentricMiniGridSensor
from allenact_plugins.minigrid_plugin.minigrid_tasks import (
MiniGridTaskSampler,
MiniGridTask,
AskForHelpSimpleCrossingTask,
)
from poisoneddoors_plugin.poisoneddoors_models import RNNActorCriticWithEmbed
from poisoneddoors_plugin.poisoneddoors_offpolicy import (
create_poisoneddoors_offpolicy_data_iterator,
)
from poisoneddoors_plugin.poisoneddoors_sensors import PoisonedDoorCurrentStateSensor
from poisoneddoors_plugin.poisoneddoors_tasks import (
PoisonedDoorsEnvironment,
PoisonedDoorsTask,
PoisonedDoorsTaskSampler,
)
from projects.advisor.minigrid_constants import MINIGRID_EXPERT_TRAJECTORIES_DIR
class MiniGridAndPDExperimentParams(NamedTuple):
TASK_NAME: str
# Default MiniGrid values
MG_AGENT_VIEW_SIZE: int = 7
MG_AGENT_VIEW_CHANNELS: int = 3
# Default Poisoned Doors values
PD_MAX_STEPS: int = 100
# Training params
NUM_TRAIN_SAMPLERS: int = 20 # if torch.cuda.is_available() else 1
ROLLOUT_STEPS: int = 100
MG_TOTAL_TRAIN_STEPS = int(1e6)
PD_TOTAL_TRAIN_STEPS = int(3e5)
NUM_TRAIN_TASKS: int = None
NUM_TEST_TASKS: int = 1000
GPU_ID: Optional[int] = 1 if torch.cuda.is_available() else None
USE_EXPERT: bool = False
RNN_TYPE: str = "LSTM"
CACHE_GRAPHS: bool = False
SHOULD_LOG = True
TEST_SEED_OFFSET: int = 0
# Hyperparameters
LR: Optional[float] = None
TF_RATIO: Optional[float] = None
FIXED_ALPHA: Optional[float] = None
ALPHA_START: Optional[float] = None
ALPHA_STOP: Optional[float] = None
# Auxiliary head parameters
INCLUDE_AUXILIARY_HEAD: bool = False
SAME_INIT_VALS_FOR_ADVISOR_HEAD: bool = False
# Logging / saving
METRIC_ACCUMULATE_INTERVAL = 10000 if torch.cuda.is_available() else 1000
CKPTS_TO_SAVE = 4
class BaseExperimentConfig(ExperimentConfig):
"""Base experiment."""
def __init__(self, task_name: str, **kwargs):
self.exp_params = MiniGridAndPDExperimentParams(TASK_NAME=task_name, **kwargs)
@property
def task_name(self):
return self.exp_params.TASK_NAME
def total_train_steps(self) -> int:
task_info = self.task_info()
return task_info["total_train_steps"]
def task_info(self):
"""All information needed about the underlying task.
# Returns
Dictionary of useful information:
- env_info: used to initialize the environment
- tag: string to use for logging
- env_class: callable of the underlying mini-grid / poisoned doors environment class
- task_class: callable of the corresponding task class
"""
name = self.task_name
output_data = dict()
if name == "PoisonedDoors":
# Specific base parameters
num_doors = 4
combination_length = 10
extra_tag = self.extra_tag()
# Parameters needed for other functions
output_data["env_info"] = {
"num_doors": num_doors,
"combination_length": combination_length,
}
output_data["task_sampler_args"] = {
**output_data["env_info"],
"max_steps": self.exp_params.PD_MAX_STEPS,
}
output_data["tag"] = "PoisonedDoorsN{}{}".format(num_doors, extra_tag,)
output_data["env_class"] = PoisonedDoorsEnvironment
output_data["task_class"] = PoisonedDoorsTask
output_data["task_sampler_class"] = PoisonedDoorsTaskSampler
elif name == "CrossingS25N10":
# Specific base parameters
grid_size = 25
num_crossings = 10
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "Crossing{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "WallCrossingS25N10":
# Specific base parameters
grid_size = 25
num_crossings = 10
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "Crossing{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
# # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set
# # repeat_failed_task_for_min_steps
# output_data["task_sampler_args"] = {
# "repeat_failed_task_for_min_steps": 1000
# }
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "WallCrossingCorruptExpertS25N10":
# Specific base parameters
grid_size = 25
num_crossings = 10
corrupt_expert_within_actions_of_goal = 15
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "WallCrossingCorruptExpert{}S{}N{}C{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
corrupt_expert_within_actions_of_goal,
self.extra_tag(),
)
# # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set
# # repeat_failed_task_for_min_steps
output_data["task_sampler_args"] = {
"extra_task_kwargs": {
"corrupt_expert_within_actions_of_goal": corrupt_expert_within_actions_of_goal
}
}
# output_data["task_sampler_args"] = {
# "repeat_failed_task_for_min_steps": 1000
# }
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "LavaCrossingCorruptExpertS15N7":
# Specific base parameters
grid_size = 15
num_crossings = 7
corrupt_expert_within_actions_of_goal = 10
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "LavaCrossingCorruptExpert{}S{}N{}C{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
corrupt_expert_within_actions_of_goal,
self.extra_tag(),
)
# # Each episode takes 4 * 25 * 25 = 2500 steps already, so no need to set
# # repeat_failed_task_for_min_steps
output_data["task_sampler_args"] = {
"extra_task_kwargs": {
"corrupt_expert_within_actions_of_goal": corrupt_expert_within_actions_of_goal
},
"repeat_failed_task_for_min_steps": 1000,
}
output_data["env_class"] = FastCrossing
output_data["task_class"] = MiniGridTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpSimpleCrossing":
# Specific base parameters
grid_size = 15
num_crossings = 7
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "AskForHelpSimpleCrossing{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
# output_data["task_sampler_args"] = {
# "repeat_failed_task_for_min_steps": 1000
# }
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpSimpleCrossingOnce":
# Specific base parameters
grid_size = 25
num_crossings = 10
toggle_is_permanent = True
obstacle_type: Callable[[], WorldObj] = Wall
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
"toggle_is_permenant": toggle_is_permanent,
}
output_data["tag"] = "AskForHelpSimpleCrossingOnce{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpLavaCrossingOnce":
# Specific base parameters
grid_size = 15
num_crossings = 7
toggle_is_permanent = True
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
"toggle_is_permenant": toggle_is_permanent,
}
output_data["tag"] = "AskForHelpLavaCrossingOnce{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
elif name == "AskForHelpLavaCrossingSmall":
# Specific base parameters
grid_size = 9
num_crossings = 4
obstacle_type: Callable[[], WorldObj] = Lava
# Parameters needed for other functions
output_data["env_info"] = {
"size": grid_size,
"num_crossings": num_crossings,
"obstacle_type": obstacle_type,
}
output_data["tag"] = "AskForHelpLavaCrossingSmall{}S{}N{}{}".format(
obstacle_type().__class__.__name__,
grid_size,
num_crossings,
self.extra_tag(),
)
output_data["task_sampler_args"] = {
"repeat_failed_task_for_min_steps": 1000
}
output_data["env_class"] = AskForHelpSimpleCrossing
output_data["task_class"] = AskForHelpSimpleCrossingTask
output_data["task_sampler_class"] = MiniGridTaskSampler
else:
raise NotImplementedError("Haven't implemented {}".format(name))
if name == "PoisonedDoors":
output_data["total_train_steps"] = self.exp_params.PD_TOTAL_TRAIN_STEPS
else:
# MiniGrid total train steps
output_data["total_train_steps"] = self.exp_params.MG_TOTAL_TRAIN_STEPS
output_data["name"] = name
return output_data
def tag(self):
return self.task_info()["tag"]
@abc.abstractmethod
def extra_tag(self):
raise NotImplementedError
def get_sensors(self) -> Sequence[Sensor]:
task_info = self.task_info()
if task_info["name"] == "PoisonedDoors":
action_space = gym.spaces.Discrete(
len(
task_info["task_class"].class_action_names(
num_doors=task_info["env_info"]["num_doors"]
)
)
)
return [PoisonedDoorCurrentStateSensor()] + (
[ExpertActionSensor(action_space=action_space)]
if self.exp_params.USE_EXPERT
else []
)
else:
# Sensors for MiniGrid tasks
action_space = gym.spaces.Discrete(
len(task_info["task_class"].class_action_names())
)
return [
EgocentricMiniGridSensor(
agent_view_size=self.exp_params.MG_AGENT_VIEW_SIZE,
view_channels=self.exp_params.MG_AGENT_VIEW_CHANNELS,
)
] + (
[ExpertActionSensor(action_space=action_space)]
if self.exp_params.USE_EXPERT
else []
)
def machine_params(self, mode="train", gpu_id="default", **kwargs):
if mode == "train":
nprocesses = self.exp_params.NUM_TRAIN_SAMPLERS
elif mode == "valid":
nprocesses = 0
elif mode == "test":
nprocesses = min(
self.exp_params.NUM_TEST_TASKS, 500 if torch.cuda.is_available() else 50
)
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
gpu_ids = [] if self.exp_params.GPU_ID is None else [self.exp_params.GPU_ID]
return MachineParams(nprocesses=nprocesses, devices=gpu_ids)
def create_model(self, **kwargs) -> nn.Module:
sensors = self.get_sensors()
task_info = self.task_info()
if task_info["name"] == "PoisonedDoors":
return RNNActorCriticWithEmbed(
input_uuid=sensors[0].uuid,
num_embeddings=4,
embedding_dim=128,
input_len=1,
action_space=gym.spaces.Discrete(
3 + task_info["env_info"]["num_doors"]
),
observation_space=SensorSuite(sensors).observation_spaces,
rnn_type=self.exp_params.RNN_TYPE,
head_type=LinearActorCritic
if not self.exp_params.INCLUDE_AUXILIARY_HEAD
else Builder( # type: ignore
LinearAdvisorActorCritic,
kwargs={
"ensure_same_init_aux_weights": self.exp_params.SAME_INIT_VALS_FOR_ADVISOR_HEAD
},
),
)
else:
# Model for MiniGrid tasks
return MiniGridSimpleConvRNN(
action_space=gym.spaces.Discrete(
len(task_info["task_class"].class_action_names())
),
num_objects=cast(EgocentricMiniGridSensor, sensors[0]).num_objects,
num_colors=cast(EgocentricMiniGridSensor, sensors[0]).num_colors,
num_states=cast(EgocentricMiniGridSensor, sensors[0]).num_states,
observation_space=SensorSuite(sensors).observation_spaces,
hidden_size=128,
rnn_type=self.exp_params.RNN_TYPE,
head_type=LinearActorCritic
if not self.exp_params.INCLUDE_AUXILIARY_HEAD
else Builder( # type: ignore
LinearAdvisorActorCritic,
kwargs={
"ensure_same_init_aux_weights": self.exp_params.SAME_INIT_VALS_FOR_ADVISOR_HEAD
},
),
)
def make_sampler_fn(
self, **kwargs
) -> Union[PoisonedDoorsTaskSampler, MiniGridTaskSampler]:
return self.task_info()["task_sampler_class"](**kwargs)
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
info = self.task_info()
if info["name"] == "PoisonedDoors":
args_dict = {
"sensors": self.get_sensors(),
"env_class": info.get("env_class"),
"env_info": info.get("env_info"),
"task_class": info["task_class"],
}
else:
args_dict = {
"sensors": self.get_sensors(),
"env_class": info.get("env_class"),
"env_info": info.get("env_info"),
"cache_graphs": self.exp_params.CACHE_GRAPHS,
"task_class": info["task_class"],
}
if "task_sampler_args" in info:
args_dict.update(info["task_sampler_args"])
if self.exp_params.NUM_TRAIN_TASKS:
args_dict["max_tasks"] = self.exp_params.NUM_TRAIN_TASKS
return args_dict
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
raise RuntimeError
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
max_tasks = self.exp_params.NUM_TEST_TASKS // total_processes + (
process_ind < (self.exp_params.NUM_TEST_TASKS % total_processes)
)
task_seeds_list = [
2 ** 31
- 1
+ self.exp_params.TEST_SEED_OFFSET
+ process_ind
+ total_processes * i
for i in range(max_tasks)
]
assert min(task_seeds_list) >= 0 and max(task_seeds_list) <= 2 ** 32 - 1
train_sampler_args = self.train_task_sampler_args(
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
if "repeat_failed_task_for_min_steps" in train_sampler_args:
del train_sampler_args["repeat_failed_task_for_min_steps"]
return {
**train_sampler_args,
"task_seeds_list": task_seeds_list,
"max_tasks": max_tasks,
"deterministic_sampling": True,
"sensors": [
s for s in train_sampler_args["sensors"] if "Expert" not in str(type(s))
],
}
def offpolicy_demo_defaults(self, also_using_ppo: bool):
ppo_defaults = self.rl_loss_default("ppo", 1)
assert ppo_defaults["update_repeats"] % 2 == 0
output_data = {}
task_info = self.task_info()
if task_info["name"] == "PoisonedDoors":
output_data.update(
{
"data_iterator_builder": lambda: create_poisoneddoors_offpolicy_data_iterator(
num_doors=task_info["env_info"]["num_doors"],
nrollouts=self.exp_params.NUM_TRAIN_SAMPLERS
// ppo_defaults["num_mini_batch"],
rollout_len=self.exp_params.ROLLOUT_STEPS,
dataset_size=task_info["total_train_steps"],
),
}
)
else:
# Off-policy defaults for MiniGrid tasks
output_data.update(
{
"data_iterator_builder": lambda: create_minigrid_offpolicy_data_iterator(
path=os.path.join(
MINIGRID_EXPERT_TRAJECTORIES_DIR,
"MiniGrid-{}-v0{}.pkl".format(task_info["name"], "",),
),
nrollouts=self.exp_params.NUM_TRAIN_SAMPLERS
// ppo_defaults["num_mini_batch"],
rollout_len=self.exp_params.ROLLOUT_STEPS,
instr_len=None,
restrict_max_steps_in_dataset=task_info["total_train_steps"],
),
}
)
# Off-policy defaults common to Poisoned Doors and MiniGrid tasks
output_data.update(
{
"ppo_update_repeats": ppo_defaults["update_repeats"] // 2
if also_using_ppo
else 0,
"ppo_num_mini_batch": ppo_defaults["num_mini_batch"]
if also_using_ppo
else 0,
"offpolicy_updates": ppo_defaults["num_mini_batch"]
* (
ppo_defaults["update_repeats"] // 2
if also_using_ppo
else ppo_defaults["update_repeats"]
),
}
)
return output_data
def rl_loss_default(self, alg: str, steps: Optional[int] = None):
if alg == "ppo":
assert steps is not None
return {
"loss": (PPO(clip_decay=LinearDecay(steps), **PPOConfig)),
"num_mini_batch": 2,
"update_repeats": 4,
}
elif alg == "a2c":
return {
"loss": A2C(**A2CConfig),
"num_mini_batch": 1,
"update_repeats": 1,
}
elif alg == "imitation":
return {
"loss": Imitation(),
"num_mini_batch": 2, # if torch.cuda.is_available() else 1,
"update_repeats": 4,
}
else:
raise NotImplementedError
def _training_pipeline(
self,
named_losses: Dict[str, Union[Loss, Builder]],
pipeline_stages: List[PipelineStage],
num_mini_batch: int,
update_repeats: Optional[int],
):
# When using many mini-batches or update repeats, decrease the learning
# rate so that the approximate size of the gradient update is similar.
lr = self.exp_params.LR
num_steps = self.exp_params.ROLLOUT_STEPS
metric_accumulate_interval = self.exp_params.METRIC_ACCUMULATE_INTERVAL
gamma = 0.99
use_gae = "reinforce_loss" not in named_losses
gae_lambda = 1.0
max_grad_norm = 0.5
total_train_steps = self.task_info()["total_train_steps"]
if self.exp_params.CKPTS_TO_SAVE == 0:
save_interval = None
else:
save_interval = math.ceil(total_train_steps / self.exp_params.CKPTS_TO_SAVE)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses=named_losses,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=None,
should_log=self.exp_params.SHOULD_LOG,
pipeline_stages=pipeline_stages,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=total_train_steps)} # type: ignore
),
)
| advisor-main | minigrid_and_pd_experiments/base.py |
import typing
from typing import Dict, Union, Tuple, Iterator, Any
from typing import Optional
import numpy as np
import torch
from gym.utils import seeding
from advisor_losses import AlphaScheduler, AdvisorWeightedStage
from allenact.algorithms.offpolicy_sync.losses.abstract_offpolicy_loss import (
AbstractOffPolicyLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel
from allenact.base_abstractions.misc import Memory
_DATASET_CACHE: Dict[str, Any] = {}
class PoisonedDoorsOffPolicyExpertCELoss(AbstractOffPolicyLoss[ActorCriticModel]):
def __init__(self, total_episodes_in_epoch: Optional[int] = None):
super().__init__()
self.total_episodes_in_epoch = total_episodes_in_epoch
def loss(
self,
model: ActorCriticModel,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
memory: Memory,
*args,
**kwargs
) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:
rollout_len, nrollouts, _, = batch["poisoned_door_state"].shape
observations = {}
for k in ["poisoned_door_state"]:
if k in batch:
observations[k] = batch[k].view(
rollout_len, nrollouts, *batch[k].shape[2:]
)
ac_out, memory = model.forward(
observations=observations,
memory=memory,
prev_actions=None,
masks=batch["masks"],
)
expert_ce_loss = -ac_out.distributions.log_prob(
batch["expert_action"].view(rollout_len, nrollouts, 1)
).mean()
info = {"expert_ce": expert_ce_loss.item()}
if self.total_episodes_in_epoch is not None:
if "completed_episode_count" not in memory:
memory["completed_episode_count"] = 0
memory["completed_episode_count"] += (
int(np.prod(batch["masks"].shape)) - batch["masks"].sum().item()
)
info["epoch_progress"] = (
memory["completed_episode_count"] / self.total_episodes_in_epoch
)
return expert_ce_loss, info, memory, rollout_len * nrollouts
class PoisonedDoorsOffPolicyAdvisorLoss(AbstractOffPolicyLoss[ActorCriticModel]):
def __init__(
self,
total_episodes_in_epoch: Optional[int] = None,
fixed_alpha: Optional[float] = 1,
fixed_bound: Optional[float] = 0.0,
alpha_scheduler: AlphaScheduler = None,
smooth_expert_weight_decay: Optional[float] = None,
*args,
**kwargs
):
super().__init__()
self.advisor_loss = AdvisorWeightedStage(
rl_loss=None,
fixed_alpha=fixed_alpha,
fixed_bound=fixed_bound,
alpha_scheduler=alpha_scheduler,
smooth_expert_weight_decay=smooth_expert_weight_decay,
*args,
**kwargs
)
self.total_episodes_in_epoch = total_episodes_in_epoch
def loss(
self,
step_count: int,
model: ActorCriticModel,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
memory: Memory,
**kwargs
) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:
rollout_len, nrollouts, _ = batch["poisoned_door_state"].shape
observations = {"poisoned_door_state": batch["poisoned_door_state"]}
ac_out, memory = model.forward(
observations=observations,
memory=memory,
prev_actions=None,
masks=batch["masks"].view(rollout_len, nrollouts, -1),
)
total_loss, losses_dict = self.advisor_loss.loss(
step_count=step_count,
batch={
"observations": {
"expert_action": torch.cat(
(
batch["expert_action"].view(rollout_len, nrollouts, 1),
torch.ones(rollout_len, nrollouts, 1, dtype=torch.int64).to(
batch["expert_action"].device
),
),
dim=-1,
)
}
},
actor_critic_output=ac_out,
)
info = {"offpolicy_" + key: val for key, val in losses_dict.items()}
if self.total_episodes_in_epoch is not None:
if "completed_episode_count" not in memory:
memory["completed_episode_count"] = 0
memory["completed_episode_count"] += (
int(np.prod(batch["masks"].shape)) - batch["masks"].sum().item()
)
info["epoch_progress"] = (
memory["completed_episode_count"] / self.total_episodes_in_epoch
)
return total_loss, info, memory, rollout_len * nrollouts
class PoisonedDoorsExpertTrajectoryIterator(Iterator):
def __init__(
self, num_doors: int, nrollouts: int, rollout_len: int, dataset_size: int,
):
super(PoisonedDoorsExpertTrajectoryIterator, self).__init__()
self.np_seeded_random_gen, _ = typing.cast(
Tuple[np.random.RandomState, Any], seeding.np_random(0)
)
self.ndoors = num_doors
self.nrollouts = nrollouts
self.rollout_len = rollout_len
self.dataset_size = dataset_size
self.initial_observations = np.zeros(
(rollout_len, nrollouts, 1), dtype=np.int64
)
self.mask = np.zeros((rollout_len, nrollouts, 1), dtype=np.float32)
self.expert_actions = np.random.randint(
4, 3 + num_doors, size=(self.dataset_size, 1)
)
self.current_ind = 0
def __next__(self) -> Dict[str, torch.Tensor]:
start = self.current_ind
end = self.current_ind + self.nrollouts * self.rollout_len
if end > self.dataset_size:
raise StopIteration()
self.current_ind = end
return {
"masks": torch.from_numpy(self.mask),
"poisoned_door_state": torch.from_numpy(self.initial_observations),
"expert_action": torch.from_numpy(
self.expert_actions[start:end].reshape(
(self.rollout_len, self.nrollouts)
)
),
}
def create_poisoneddoors_offpolicy_data_iterator(
num_doors: int, nrollouts: int, rollout_len: int, dataset_size: int,
) -> PoisonedDoorsExpertTrajectoryIterator:
return PoisonedDoorsExpertTrajectoryIterator(
num_doors=num_doors,
nrollouts=nrollouts,
rollout_len=rollout_len,
dataset_size=dataset_size,
)
| advisor-main | poisoneddoors_plugin/poisoneddoors_offpolicy.py |
import typing
from typing import Dict, Tuple, Any, Union
import gym
import torch
import torch.nn as nn
from gym.spaces.dict import Dict as SpaceDict
from allenact.base_abstractions.misc import ActorCriticOutput, DistributionType, Memory
from allenact.embodiedai.models.basic_models import RNNActorCritic, LinearActorCritic
from allenact.utils.misc_utils import prepare_locals_for_super
from gail_models import PoisonedDoorsDiscriminatorRNN
class RNNActorCriticWithEmbed(RNNActorCritic):
def __init__(
self,
input_uuid: str,
num_embeddings: int,
embedding_dim: int,
input_len: int,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
num_layers: int = 1,
rnn_type: str = "GRU",
head_type=LinearActorCritic,
):
hidden_size = embedding_dim * input_len
super().__init__(
input_uuid=input_uuid,
action_space=action_space,
observation_space=SpaceDict(
{
input_uuid: gym.spaces.Box(
-float("inf"), float("inf"), shape=(hidden_size,)
)
}
),
hidden_size=hidden_size,
num_layers=num_layers,
rnn_type=rnn_type,
head_type=head_type,
)
self.initial_embedding = nn.Embedding(
num_embeddings=num_embeddings, embedding_dim=embedding_dim
)
def forward( # type: ignore
self,
observations: Dict[str, Union[torch.FloatTensor, Dict[str, Any]]],
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
**kwargs,
) -> Tuple[ActorCriticOutput[DistributionType], Any]:
input_obs = observations[self.input_uuid]
obs = typing.cast(
Dict[str, torch.FloatTensor],
{
self.input_uuid: self.initial_embedding(input_obs).view(
*input_obs.shape[:2], -1
)
},
)
return super(RNNActorCriticWithEmbed, self).forward(
observations=obs, memory=memory, prev_actions=prev_actions, masks=masks,
)
class RNNActorCriticWithEmbedAndDiscriminator(RNNActorCriticWithEmbed):
def __init__(
self,
input_uuid: str,
num_embeddings: int,
embedding_dim: int,
input_len: int,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
num_layers: int = 1,
rnn_type: str = "GRU",
head_type=LinearActorCritic,
):
super(RNNActorCriticWithEmbedAndDiscriminator, self).__init__(
**prepare_locals_for_super(locals())
)
self.discriminator = PoisonedDoorsDiscriminatorRNN(
input_uuid=input_uuid,
num_action_embeddings=action_space.n,
num_observation_embeddings=num_embeddings,
embedding_dim=embedding_dim,
classifier_hidden_dim=128,
)
self.train()
def forward(
self,
observations: Dict[str, Any],
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
**kwargs,
):
out, memory = super(RNNActorCriticWithEmbedAndDiscriminator, self).forward(
observations=observations,
memory=memory,
prev_actions=prev_actions,
masks=masks,
)
out.extras["discriminator"] = self.discriminator
return out, memory
| advisor-main | poisoneddoors_plugin/poisoneddoors_models.py |
from typing import Optional, Any
import gym
import numpy as np
from allenact.base_abstractions.sensor import Sensor
from allenact.utils.misc_utils import prepare_locals_for_super
from poisoneddoors_plugin.poisoneddoors_tasks import (
PoisonedDoorsEnvironment,
PoisonedDoorsTask,
PoisonedEnvStates,
)
class PoisonedDoorCurrentStateSensor(
Sensor[PoisonedDoorsEnvironment, PoisonedDoorsTask]
):
def __init__(self, uuid: str = "poisoned_door_state", **kwargs: Any):
self.nstates = len(PoisonedEnvStates)
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(low=0, high=self.nstates - 1, shape=(1,), dtype=int,)
def get_observation(
self,
env: PoisonedDoorsEnvironment,
task: Optional[PoisonedDoorsTask],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
return np.array([int(env.current_state.value)])
| advisor-main | poisoneddoors_plugin/poisoneddoors_sensors.py |
advisor-main | poisoneddoors_plugin/__init__.py |
|
import random
from enum import Enum
from typing import Any, Tuple, Union, List, Optional, Dict
import gym
import numpy as np
from gym.utils import seeding
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import SensorSuite, Sensor
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.experiment_utils import set_seed
from allenact.utils.system import get_logger
def get_combination(nactions: int, combination_length: int):
s = random.getstate()
random.seed(combination_length)
comb = [random.randint(0, nactions - 1) for _ in range(combination_length)]
random.setstate(s)
return comb
class PoisonedEnvStates(Enum):
choosing_door = 0
entering_pass_start = 1
entering_pass_cont = 2
done = 3
class PoisonedDoorsEnvironment(object):
def __init__(self, num_doors: int, combination_length: int):
self.num_doors = num_doors
self.combination_length = combination_length
self.combination = get_combination(
nactions=3, combination_length=self.combination_length
)
self.combination_index = 0
self.max_comb_index = 0
self.current_state = PoisonedEnvStates.choosing_door
self.chosen_door: Optional[int] = None
self.good_door_ind: Optional[int] = None
@classmethod
def class_action_names(cls, num_doors: int):
return ("c0", "c1", "c2") + tuple(str(i) for i in range(num_doors))
def action_names(self):
return self.class_action_names(num_doors=self.num_doors)
def reset(self, door_ind: int):
assert 1 <= door_ind < self.num_doors
self.good_door_ind = door_ind
# print(self.good_door_ind)
self.chosen_door = None
self.current_state = PoisonedEnvStates.choosing_door
self.combination_index = 0
self.max_comb_index = 0
def is_done(self):
return self.current_state == PoisonedEnvStates.done
def step(self, action: int) -> float:
if action < 3 or self.current_state != self.current_state.choosing_door:
if self.chosen_door is None:
return 0.0
else:
assert self.chosen_door == 0, "Stepping when done."
correct = self.combination[self.combination_index] == action
if correct:
self.combination_index += 1
self.max_comb_index = max(
self.combination_index, self.max_comb_index
)
else:
self.combination_index = 0
if correct:
self.current_state = PoisonedEnvStates.entering_pass_cont
elif not correct:
self.current_state = PoisonedEnvStates.done
if self.combination_index >= len(self.combination):
self.current_state = PoisonedEnvStates.done
return 1.0
return 0.0
elif action == 3:
self.chosen_door = 0
self.combination_index = 0
self.current_state = PoisonedEnvStates.entering_pass_start
return 0.0
else:
self.current_state = PoisonedEnvStates.done
self.chosen_door = action - 3
return 2.0 * (1 if self.good_door_ind == action - 3 else -1)
class PoisonedDoorsTask(Task[PoisonedDoorsEnvironment]):
"""Defines an abstract embodied task in the light house gridworld.
# Attributes
env : The PoisonedDoorsEnvironment object.
sensor_suite: Collection of sensors formed from the `sensors` argument in the initializer.
task_info : Dictionary of (k, v) pairs defining task goals and other task information.
max_steps : The maximum number of steps an agent can take an in the task before it is considered failed.
observation_space: The observation space returned on each step from the sensors.
"""
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
pass
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(len(self.env.action_names()))
def _step(self, action: int) -> RLStepResult:
reward = self.env.step(action)
return RLStepResult(
observation=self.get_observations(),
reward=reward,
done=self.env.is_done(),
info=None,
)
def reached_terminal_state(self) -> bool:
return self.env.is_done()
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return PoisonedDoorsEnvironment.class_action_names(**kwargs)
def action_names(self) -> Tuple[str, ...]:
return self.env.action_names()
def close(self) -> None:
pass
def query_expert(self, **kwargs) -> Tuple[Any, bool]:
if self.env.current_state == PoisonedEnvStates.done:
get_logger().warning("Trying to query expert with done task.")
return (-1, False)
elif self.env.current_state == PoisonedEnvStates.choosing_door:
return (3 + self.env.good_door_ind, True)
else:
return (self.env.combination[self.env.combination_index], True)
def metrics(self) -> Dict[str, Any]:
metrics = super(PoisonedDoorsTask, self).metrics()
for i in range(self.env.num_doors):
metrics["chose_door_{}".format(i)] = 1.0 * (self.env.chosen_door == i)
metrics["chose_no_door"] = 1.0 * (self.env.chosen_door is None)
metrics["chose_good_door"] = self.env.chosen_door == self.env.good_door_ind
metrics["opened_lock"] = 1.0 * (
self.env.max_comb_index == self.env.combination_length
)
metrics["success"] = metrics["opened_lock"] or metrics["chose_good_door"]
if self.env.chosen_door == 0:
metrics["max_comb_correct"] = float(1.0 * self.env.max_comb_index)
return metrics
class PoisonedDoorsTaskSampler(TaskSampler):
def __init__(
self,
num_doors: int,
combination_length: int,
sensors: Union[SensorSuite, List[Sensor]],
max_steps: int,
max_tasks: Optional[int] = None,
num_unique_seeds: Optional[int] = None,
task_seeds_list: Optional[List[int]] = None,
deterministic_sampling: bool = False,
seed: Optional[int] = None,
**kwargs
):
self.env = PoisonedDoorsEnvironment(
num_doors=num_doors, combination_length=combination_length
)
self._last_sampled_task: Optional[PoisonedDoorsTask] = None
self.sensors = (
SensorSuite(sensors) if not isinstance(sensors, SensorSuite) else sensors
)
self.max_steps = max_steps
self.max_tasks = max_tasks
self.num_tasks_generated = 0
self.deterministic_sampling = deterministic_sampling
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
assert (self.num_unique_seeds is None) or (
0 < self.num_unique_seeds
), "`num_unique_seeds` must be a positive integer."
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
if self.task_seeds_list is not None:
if self.num_unique_seeds is not None:
assert self.num_unique_seeds == len(
self.task_seeds_list
), "`num_unique_seeds` must equal the length of `task_seeds_list` if both specified."
self.num_unique_seeds = len(self.task_seeds_list)
elif self.num_unique_seeds is not None:
self.task_seeds_list = list(range(self.num_unique_seeds))
assert (not deterministic_sampling) or (
self.num_unique_seeds is not None
), "Cannot use deterministic sampling when `num_unique_seeds` is `None`."
if (not deterministic_sampling) and self.max_tasks:
get_logger().warning(
"`deterministic_sampling` is `False` but you have specified `max_tasks < inf`,"
" this might be a mistake when running testing."
)
self.seed: int = int(
seed if seed is not None else np.random.randint(0, 2 ** 31 - 1)
)
self.np_seeded_random_gen: Optional[np.random.RandomState] = None
self.set_seed(self.seed)
@property
def num_doors(self):
return self.env.num_doors
@property
def combination_length(self):
return self.env.combination_length
@property
def length(self) -> Union[int, float]:
return (
float("inf")
if self.max_tasks is None
else self.max_tasks - self.num_tasks_generated
)
@property
def total_unique(self) -> Optional[Union[int, float]]:
n = self.num_doors
return n if self.num_unique_seeds is None else min(n, self.num_unique_seeds)
@property
def last_sampled_task(self) -> Optional[Task]:
return self._last_sampled_task
def next_task(self, force_advance_scene: bool = False) -> Optional[Task]:
if self.length <= 0:
return None
if self.num_unique_seeds is not None:
if self.deterministic_sampling:
seed = self.task_seeds_list[
self.num_tasks_generated % len(self.task_seeds_list)
]
else:
seed = self.np_seeded_random_gen.choice(self.task_seeds_list)
else:
seed = self.np_seeded_random_gen.randint(0, 2 ** 31 - 1)
self.num_tasks_generated += 1
self.env.reset(door_ind=1 + (seed % (self.num_doors - 1)))
return PoisonedDoorsTask(
env=self.env, sensors=self.sensors, task_info={}, max_steps=self.max_steps
)
def close(self) -> None:
pass
@property
def all_observation_spaces_equal(self) -> bool:
return True
def reset(self) -> None:
self.num_tasks_generated = 0
self.set_seed(seed=self.seed)
def set_seed(self, seed: int) -> None:
set_seed(seed)
self.np_seeded_random_gen, _ = seeding.np_random(seed)
self.seed = seed
| advisor-main | poisoneddoors_plugin/poisoneddoors_tasks.py |
import argparse
import glob
import multiprocessing as mp
import os
mp = mp.get_context("forkserver")
from projects.advisor.summarization_utils import create_comparison_hp_plots_from_tsv
def get_argument_parser():
"""Creates the argument parser."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="Random HP Search",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--dir",
type=str,
default="./hp_runs",
help="directory in which the result tsv files were saved (default: './hp_runs')",
)
parser.add_argument(
"--env_type",
type=str,
default="",
help="which task to generate plots for, defaults to generating plots for all tasks.",
)
return parser
if __name__ == "__main__":
"""Summarize the information saved via `minigrid_random_hp_search` and save
E[max()] plots. This script would summarize all tsv files saves in the
`dir` directory, unless a `--env_name` flag directs it to a particular
environment.
Run this with the following commmand:
`python projects/advisor/minigrid_and_pd_scripts/summarize_random_hp_search.py`
"""
args = get_argument_parser().parse_args()
dir = args.dir
env_type = args.env_type
# env_type = "AskForHelpSimpleCrossingOnce"
# env_type = "PoisonedDoors"
# env_type = "AskForHelpSimpleCrossingOnce"
# env_type = "LavaCrossingCorruptExpertS15N7"
highlight_best = True
hide_labels = True
overwrite = True
paths = glob.glob(os.path.join(dir, "random_*.tsv"))
paths = [p for p in paths if env_type.lower() in os.path.basename(p).lower()]
processes = []
for path in paths:
print()
print(os.path.basename(path))
kwargs = dict(
num_hp_evals_for_steps_plot=10,
tsv_file_path=path,
overwrite=overwrite,
include_legend=False,
highlight_best=highlight_best,
hide_labels=hide_labels,
)
if len(paths) == 1:
create_comparison_hp_plots_from_tsv(**kwargs)
else:
p = mp.Process(target=create_comparison_hp_plots_from_tsv, kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
| advisor-main | minigrid_and_pd_scripts/summarize_random_hp_search.py |
import json
import os
import time
import typing
import babyai
import blosc
import torch
import torch.multiprocessing as mp
from tqdm import tqdm
from allenact.main import load_config, get_argument_parser
from allenact.utils.misc_utils import partition_sequence
from allenact.utils.system import get_logger
from allenact_plugins.minigrid_plugin.minigrid_tasks import MiniGridTaskSampler
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
mp = mp.get_context("forkserver")
import queue
from setproctitle import setproctitle as ptitle
import numpy as np
def collect_demos(
process_id: int, args, input_queue: mp.Queue, output_queue: mp.Queue,
):
"""Saves a collection of training demos."""
ptitle("({}) Demo Saver".format(process_id))
output_data_list = []
try:
cfg: BaseExperimentConfig
config_kwargs = json.loads(
"{}" if args.config_kwargs is None else args.config_kwargs
)
config_kwargs["MG_AGENT_VIEW_CHANNELS"] = 3
config_kwargs["task_name"] = args.env_name
args.config_kwargs = json.dumps(config_kwargs)
cfg, _ = load_config(args) # type: ignore
wait_episodes = 100 # if torch.cuda.is_available() else 1
task_sampler_args = cfg.train_task_sampler_args(
process_ind=0, total_processes=0,
)
task_sampler = typing.cast(
MiniGridTaskSampler,
cfg.make_sampler_fn(
**{
**task_sampler_args,
"task_seeds_list": ["UNDEFINED"],
"deterministic_sampling": True,
"repeat_failed_task_for_min_steps": 0,
}
),
)
while True:
seeds = input_queue.get(timeout=1)
for seed in seeds:
task_sampler.task_seeds_list[0] = seed
task = task_sampler.next_task()
images = []
actions = []
directions = []
def append_values():
assert not task.is_done()
obs = task.get_observations()
images.append(obs["minigrid_ego_image"])
actions.append(int(obs["expert_action"].reshape(-1)[0]))
directions.append(task.env.agent_dir)
while not task.is_done():
append_values()
task.step(action=actions[-1])
output_data_list.append(
{
"seed": seed,
"images": blosc.pack_array(np.array(images)),
"actions": actions,
"directions": directions,
}
)
if len(output_data_list) >= wait_episodes:
output_queue.put(output_data_list)
# print(
# sum(len(od["actions"]) for od in output_data_list)
# / len(output_data_list)
# )
output_data_list = []
except queue.Empty:
if len(output_data_list) != 0:
output_queue.put(output_data_list)
get_logger().info("Queue empty for worker {}, exiting.".format(process_id))
def create_demos(args, nprocesses: int, min_demos: int):
assert args.experiment in ["", "bc"], "`--experiment` must be either empty or 'bc'."
assert os.path.relpath(args.output_dir) != ""
task_name = args.env_name
ptitle("Master (DEMOs {})".format(" and ".join(task_name)))
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
demos_save_path = os.path.join(output_dir, "MiniGrid-{}-v0.pkl".format(task_name))
if os.path.exists(demos_save_path):
demos_list = babyai.utils.load_demos(demos_save_path)
if len(demos_list) > min_demos:
min_demos = len(demos_list)
demos_list.extend([None] * (min_demos - len(demos_list)))
remaining_seeds = set(i for i, d in enumerate(demos_list) if d is None)
else:
demos_list = [None] * min_demos
remaining_seeds = set(range(min_demos))
if len(remaining_seeds) == 0:
print(f"No more demos to save for task {task_name}")
return len(demos_list), sum([len(dl[3]) for dl in demos_list])
print(f"Beginning to save demos with {len(remaining_seeds)} remaining")
input_queue = mp.Queue()
for seeds in partition_sequence(
list(remaining_seeds), min(2 ** 15 - 1, len(remaining_seeds))
):
# Annoyingly a mp.Queue can hold a max of 2**15 - 1 items so we have to do this hack
input_queue.put(seeds)
output_queue = mp.Queue()
processes = []
for i in range(min(nprocesses, len(remaining_seeds))):
processes.append(
mp.Process(
target=collect_demos,
kwargs=dict(
process_id=i,
args=args,
input_queue=input_queue,
output_queue=output_queue,
),
)
)
processes[-1].start()
time.sleep(0.1)
with tqdm(total=len(remaining_seeds)) as pbar:
total_demos_created = sum(d is not None for d in demos_list)
while len(remaining_seeds) != 0:
try:
run_data_list = output_queue.get(timeout=60)
for run_data in run_data_list:
remaining_seeds.remove(run_data["seed"])
demos_list[run_data["seed"]] = (
"",
run_data["images"],
run_data["directions"],
run_data["actions"],
)
total_demos_created += 1
if total_demos_created % 10000 == 0:
babyai.utils.save_demos(demos_list, demos_save_path)
pbar.update(1)
except queue.Empty as _:
print("No demo saved for 60 seconds")
babyai.utils.save_demos(demos_list, demos_save_path)
for p in processes:
try:
p.join(1)
except Exception as _:
pass
print("Single stage of saving data is done!")
return len(demos_list), sum([len(dl[3]) for dl in demos_list])
if __name__ == "__main__":
"""Run this with the following command (from the package's root):
Command:
python minigrid_and_pd_scripts/save_expert_demos.py bc \
-b minigrid_and_pd_experiments/ \
-o minigrid_data/minigrid_demos \
--env_name CrossingS25N10
Generate all the commands:
```python
ns = [
"CrossingS25N10",
"WallCrossingS25N10",
"WallCrossingCorruptExpertS25N10",
"LavaCrossingCorruptExpertS15N7",
"AskForHelpSimpleCrossing",
"AskForHelpSimpleCrossingOnce",
"AskForHelpLavaCrossingOnce",
"AskForHelpLavaCrossingSmall",
]
s = "python minigrid_and_pd_scripts/save_expert_demos.py bc -b minigrid_and_pd_experiments/ -o minigrid_data/minigrid_demos --env_name {}"
cmd = " ; ".join([s.format(n) for n in ns])
print(cmd)
```
"""
parser = get_argument_parser()
parser.add_argument(
"--env_name", type=str, required=True,
)
args = parser.parse_args()
initial_processes = min(6 if not torch.cuda.is_available() else 10, mp.cpu_count())
nprocesses = min(6 if not torch.cuda.is_available() else 56, mp.cpu_count())
min_demos = int(20)
count = 0
while count < int(1e6):
min_demos, count = create_demos(
args,
nprocesses=initial_processes if count == 0 else nprocesses,
min_demos=min_demos,
)
print(f"{count} frames saved so far.")
min_demos = max(int(1e6 / (count / min_demos)), min_demos) + 100
print("Saving explore combination data is done!")
| advisor-main | minigrid_and_pd_scripts/save_expert_demos.py |
advisor-main | minigrid_and_pd_scripts/__init__.py |
|
import json
import os
from typing import cast, Dict
import tqdm
from advisor_constants import ADVISOR_TOP_LEVEL_DIR
from allenact.main import get_argument_parser, load_config
from allenact.utils.experiment_utils import set_seed, ScalarMeanTracker
from minigrid_and_pd_experiments.base import BaseExperimentConfig
TASK_TO_RANDOM_PERFORMANCE = {
"PoisonedDoors": {
"ep_length": 2.091,
"reward": -0.464,
"chose_door_0": 0.254,
"chose_door_1": 0.242,
"chose_door_2": 0.25,
"chose_door_3": 0.254,
"chose_no_door": 0.0,
"chose_good_door": 0.257,
"opened_lock": 0.0,
"success": 0.257,
"max_comb_correct": 0.2125984251968504,
},
"CrossingS25N10": {"ep_length": 25.908, "reward": 0.0, "success": 0.0},
"WallCrossingS25N10": {"ep_length": 2466.98, "reward": 0.0168872, "success": 0.05},
"WallCrossingCorruptExpertS25N10": {
"ep_length": 2463.183,
"reward": 0.018654119999999996,
"success": 0.054,
},
"LavaCrossingCorruptExpertS15N7": {
"ep_length": 19.317,
"reward": 0.0,
"success": 0.0,
},
"AskForHelpSimpleCrossing": {
"ep_length": 882.099,
"reward": 0.024601,
"explored_count": 20.073,
"final_distance": 11.309,
"success": 0.067,
"toggle_percent": 0.2505814965872374,
},
"AskForHelpSimpleCrossingOnce": {
"ep_length": 2484.158,
"reward": 0.008303119999999999,
"explored_count": 45.412,
"final_distance": 22.958,
"success": 0.026,
"toggle_percent": 0.2500501483796506,
},
"AskForHelpLavaCrossingOnce": {
"ep_length": 26.422,
"reward": 0.0,
"explored_count": 3.952,
"final_distance": 21.539,
"success": 0.0,
"toggle_percent": 0.2231268780071966,
},
"AskForHelpLavaCrossingSmall": {
"ep_length": 19.678,
"reward": 0.0,
"explored_count": 3.345,
"final_distance": 9.904,
"success": 0.0,
"toggle_percent": 0.20499024899878812,
},
}
_TASK_NAMES = [
"PoisonedDoors",
"CrossingS25N10",
"WallCrossingS25N10",
"WallCrossingCorruptExpertS25N10",
"LavaCrossingCorruptExpertS15N7",
"AskForHelpSimpleCrossing",
"AskForHelpSimpleCrossingOnce",
"AskForHelpLavaCrossingOnce",
"AskForHelpLavaCrossingSmall",
]
if __name__ == "__main__":
for task_name in _TASK_NAMES:
config_kwargs = {"task_name": task_name}
exp_path = os.path.join(
ADVISOR_TOP_LEVEL_DIR, "minigrid_and_pd_experiments/bc.py"
)
args_list = [
exp_path,
"--config_kwargs",
json.dumps(config_kwargs),
]
parser = get_argument_parser()
args = parser.parse_args(args=args_list)
cfg: BaseExperimentConfig = cast(BaseExperimentConfig, load_config(args)[0])
test_sampler_kwargs = cfg.test_task_sampler_args(
process_ind=0, total_processes=1, seeds=[0]
)
task_sampler = cfg.make_sampler_fn(**test_sampler_kwargs)
metrics_list = []
means_tracker = ScalarMeanTracker()
k = 0
print(f"Starting random performance test for {task_name}")
pbar = tqdm.tqdm(total=cfg.exp_params.NUM_TEST_TASKS)
while True:
set_seed(k)
k += 1
task = task_sampler.next_task()
if task is None:
break
while not task.is_done():
task.step(action=task.action_space.sample())
metrics_list.append(task.metrics())
means_tracker.add_scalars(
{k: v for k, v in metrics_list[-1].items() if not isinstance(v, Dict)}
)
pbar.update(1)
pbar.close()
print()
print(f"Random performance for {task_name}:")
print(dict(means_tracker.means()))
print("\n")
| advisor-main | minigrid_and_pd_scripts/compute_random_performance_for_task.py |
import itertools
import json
import math
import os
import queue
import time
import warnings
from typing import Dict, List, Optional, Any
import canonicaljson
import torch
import torch.multiprocessing as mp
from allenact.algorithms.onpolicy_sync.runner import OnPolicyRunner
from allenact.main import init_logging, load_config, get_argument_parser
from allenact.utils.misc_utils import rand_float
from allenact.utils.system import get_logger, update_log_level
from projects.advisor.minigrid_and_pd_experiments.base import BaseExperimentConfig
from projects.advisor.minigrid_constants import (
MINIGRID_ENV_NAMES_SUPPORTED,
demos_exist_for_env,
)
mp = mp.get_context("forkserver")
from setproctitle import setproctitle as ptitle
import pandas as pd
import numpy as np
def generate_random_lrs_tf_ratio_and_alphas(nsamples: int):
np.random.seed(1)
lr_samples = np.exp(rand_float(math.log(1e-4), math.log(0.5), nsamples))
np.random.seed(2)
tf_ratios = rand_float(0.1, 0.9, nsamples)
np.random.seed(3)
fixed_alphas = [np.random.choice([4.0, 8.0, 16.0, 32.0]) for _ in range(nsamples)]
return lr_samples, tf_ratios, fixed_alphas
def iteratively_run_experiments(
process_id: int,
gpu_id: Optional[int],
args,
input_queue: mp.Queue,
output_queue: mp.Queue,
log_level: str,
test_seed_offset: int = 0,
):
"""Iteratively train and test explore/combination models under different
training regimes.
This function is very similar to the `iteratively_run_lighthouse_experiments` function except
that rather than training with different levels of supervision, here we only have one
level of supervision and instead it's the training regime (e.g. ppo v.s. dagger) that is
allowed to change based on the values in the `input_queue`.
See `iteratively_run_lighthouse_experiments` for detailed documentation.
"""
ptitle("({}) Create Iterative Experiment Runner".format(process_id))
init_logging(log_level)
def log_info(msg):
update_log_level(logger=get_logger(), human_log_level="info")
get_logger().info(msg)
update_log_level(logger=get_logger(), human_log_level=log_level)
try:
while True:
task_name, experiment_str, config_kwargs, seed = input_queue.get(timeout=1)
args.experiment = experiment_str
args.config_kwargs = json.dumps(
{
"task_name": task_name,
"GPU_ID": gpu_id,
"TEST_SEED_OFFSET": test_seed_offset,
**config_kwargs,
}
)
args.disable_tensorboard = True
args.disable_config_saving = True
cfg: BaseExperimentConfig = load_config(args)[0] # type: ignore
# assert agent_view_size % 2 == 1
optimal_ave_ep_length = cfg.task_info().get("optimal_ave_ep_length")
log_info(
f"Running training with (env, exp, config_kwargs, seed) ="
f" ({task_name}, {experiment_str}, {config_kwargs}, {seed})"
)
runner = OnPolicyRunner(
config=cfg,
output_dir=args.output_dir,
loaded_config_src_files=None,
seed=args.seed,
mode="train",
mp_ctx=mp,
disable_tensorboard=args.disable_tensorboard,
disable_config_saving=args.disable_config_saving,
)
train_start_time_str = runner.start_train(
max_sampler_processes_per_worker=1,
save_ckpt_after_every_pipeline_stage=False,
)
ckpt_dir = runner.checkpoint_dir(
start_time_str=train_start_time_str, create_if_none=False
)
log_info(
f"Running testing with (env, exp, config_kwargs, seed) ="
f" ({task_name}, {experiment_str}, {config_kwargs}, {seed})"
)
runner.mode = "test"
test_results = runner.start_test(
checkpoint_path_dir_or_pattern=ckpt_dir,
max_sampler_processes_per_worker=1,
)
runner.close()
log_info(
f"Testing complete for (minigrid, exp, config_kwargs, seed) ="
f" ({task_name}, {experiment_str}, {config_kwargs}, {seed})"
)
output_data = {
"exp_type": experiment_str,
"env": task_name,
"config_kwargs_str": canonicaljson.encode_canonical_json(
config_kwargs
).decode("utf-8"),
"reward": [float(tr["reward"]) for tr in test_results],
"avg_ep_length": [float(tr["ep_length"]) for tr in test_results],
"train_steps": [float(tr["training_steps"]) for tr in test_results],
"seed": seed,
"lr": cfg.exp_params.LR,
"extra_tag": cfg.extra_tag(),
}
if optimal_ave_ep_length is not None:
output_data.update(
{
"reached_near_optimal": [
1 * (tr["ep_length"] < optimal_ave_ep_length * 1.1)
for tr in test_results
],
"optimal_avg_ep_length": optimal_ave_ep_length,
}
)
for k in test_results[0]:
if any(
metric_str in k
for metric_str in [
"success",
"found_goal",
"max_comb_correct",
"chose_",
"opened_",
]
):
output_data[k] = [float(tr.get(k, np.nan)) for tr in test_results]
output_queue.put((seed, output_data,))
except queue.Empty:
log_info("Queue empty for worker {}, exiting.".format(process_id))
if __name__ == "__main__":
"""Sample (equally) over hyperparams for each baseline. Aggregate
information in a tsv. This leads to a TSV with `nsamples` times
`number_of_baselines` If offpolicy baselines are to be run,
`demos_exist_for_env` should be able to find demos for the environment
being hp searched for.
Run this with the following command.
Command:
```
python projects/advisor/minigrid_and_pd_scripts/random_hp_search.py \
RUN \
-m 1 \
-b projects/advisor/minigrid_and_pd_experiments \
--output_dir hp_runs \
--log_level error \
--env_name CrossingS25N10
```
"""
parser = get_argument_parser()
parser.add_argument(
"--env_name", type=str, required=True,
)
args = parser.parse_args()
nsamples = 50
if torch.cuda.is_available():
gpu_memory = torch.cuda.get_device_properties(0).total_memory
max_processes_for_gpus = torch.cuda.device_count() * math.floor(
gpu_memory / (1300 * (2 ** 20))
)
else:
max_processes_for_gpus = 0
nprocesses = (
min(max_processes_for_gpus, math.floor(0.9 * mp.cpu_count()))
if torch.cuda.is_available()
else 4
)
gpu_ids = (
[] if not torch.cuda.is_available() else list(range(torch.cuda.device_count()))
)
lr_samples, tf_ratios, fixed_alphas = generate_random_lrs_tf_ratio_and_alphas(
nsamples=nsamples
)
lr_cfg_kwargs = [{"LR": lr} for lr in lr_samples]
tf_ratio_cfg_kwargs = [{"TF_RATIO": ratio} for ratio in tf_ratios]
fixed_alpha_cfg_kwargs = [
{"FIXED_ALPHA": fixed_alpha} for fixed_alpha in fixed_alphas
]
lr_tf_ratio_cfg_kwargs = [
{**a, **b} for a, b in zip(lr_cfg_kwargs, tf_ratio_cfg_kwargs)
]
fixed_advisor_cfg_kwargs = [
{**a, **b} for a, b in zip(lr_cfg_kwargs, fixed_alpha_cfg_kwargs)
]
dagger_fixed_advisor_cfg_kwargs = [
{**a, **b, **c}
for a, b, c in zip(lr_cfg_kwargs, tf_ratio_cfg_kwargs, fixed_alpha_cfg_kwargs)
]
experiment_types_and_cfg_kwargs: Dict[str, List[Dict[str, Any]]] = {
"bc_teacher_forcing_then_ppo": lr_tf_ratio_cfg_kwargs,
"bc_teacher_forcing_then_advisor_fixed_alpha_different_head_weights": dagger_fixed_advisor_cfg_kwargs,
"bc_teacher_forcing_then_advisor": dagger_fixed_advisor_cfg_kwargs,
"bc": lr_cfg_kwargs,
"dagger": lr_tf_ratio_cfg_kwargs,
"ppo": lr_cfg_kwargs,
"advisor_fixed_alpha_different_heads": fixed_advisor_cfg_kwargs,
"advisor": fixed_advisor_cfg_kwargs,
"bc_teacher_forcing": lr_cfg_kwargs,
"dagger_then_advisor_fixed_alpha_different_head_weights": dagger_fixed_advisor_cfg_kwargs,
"dagger_then_advisor": dagger_fixed_advisor_cfg_kwargs,
"dagger_then_ppo": lr_tf_ratio_cfg_kwargs,
"bc_then_ppo": lr_tf_ratio_cfg_kwargs,
"bc_with_ppo": lr_cfg_kwargs,
"gail": lr_cfg_kwargs,
}
if demos_exist_for_env(args.env_name):
experiment_types_and_cfg_kwargs.update(
{
"ppo_with_offpolicy_advisor_fixed_alpha_different_heads": fixed_advisor_cfg_kwargs,
"ppo_with_offpolicy_advisor": fixed_advisor_cfg_kwargs,
"ppo_with_offpolicy": lr_cfg_kwargs,
"pure_offpolicy": lr_cfg_kwargs,
}
)
else:
warnings.warn(
"No demos found for {}, will not run off policy methods.".format(
args.env_name
)
)
assert args.env_name in ("PoisonedDoors",) + MINIGRID_ENV_NAMES_SUPPORTED
# Currently, saving data for one task at a time
task_names = [args.env_name]
exp_type_and_cfg_kwargs_list = []
for exp_type, cfg_kwargs_variants in experiment_types_and_cfg_kwargs.items():
if len(cfg_kwargs_variants) == 0:
cfg_kwargs_variants = [None]
for seed, cfg_kwargs in enumerate(cfg_kwargs_variants):
exp_type_and_cfg_kwargs_list.append((exp_type, cfg_kwargs, seed))
ptitle("Master ({})".format(" and ".join(task_names)))
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
assert len(task_names) == 1
matrix_save_data_path = os.path.join(
output_dir, "random_hp_search_runs_{}.tsv".format(task_names[0]),
)
if os.path.exists(matrix_save_data_path):
df = pd.read_csv(matrix_save_data_path, sep="\t")
df = df.where(pd.notnull(df), None)
df["config_kwargs_str"] = df["config_kwargs_str"].astype(str)
else:
df = pd.DataFrame(
dict(
env=[],
exp_type=[],
config_kwargs_str=[],
success=[],
reached_near_optimal=[],
avg_ep_length=[],
train_steps=[],
found_goal=[],
max_comb_correct=[],
seed=[],
extra_tag=[],
lr=[],
)
)
seen_tuples = set(
zip(df["env"], df["exp_type"], df["config_kwargs_str"], df["seed"])
)
all_tuples_to_train_set = set()
input_queue = mp.Queue()
input_queue_names = []
total_runs = 0
for env, (exp_type, cfg_kwargs, seed) in list(
itertools.product(task_names, exp_type_and_cfg_kwargs_list)
):
total_runs += 1
t = (env, exp_type, cfg_kwargs, seed)
# df loads cfg_kwargs as a string
t_for_matching = (
env,
exp_type,
canonicaljson.encode_canonical_json(cfg_kwargs).decode("utf-8"),
seed,
)
all_tuples_to_train_set.add(t_for_matching)
if t_for_matching not in seen_tuples:
input_queue.put(t)
input_queue_names.append(str((t[1], t[3])))
seen_tuples = seen_tuples & all_tuples_to_train_set
print("Queue:" + "\n".join(input_queue_names))
output_queue = mp.Queue()
print(
"{} (of {}) experiments already completed! Running the rest.".format(
len(seen_tuples), total_runs
)
)
processes = []
nprocesses = min(nprocesses, total_runs - len(seen_tuples))
print(f"Starting {args.env_name} HP Search with {nprocesses} processes.")
for i in range(nprocesses):
processes.append(
mp.Process(
target=iteratively_run_experiments,
kwargs=dict(
process_id=i,
gpu_id=gpu_ids[i % len(gpu_ids)] if len(gpu_ids) != 0 else None,
args=args,
input_queue=input_queue,
output_queue=output_queue,
log_level=args.log_level,
),
)
)
processes[-1].start()
time.sleep(0.1)
while len(seen_tuples) != total_runs:
try:
output_seed, run_data = output_queue.get(timeout=120)
except queue.Empty as _:
print(
f"{120} seconds passed without any experiment completing, continuing wait..."
)
continue
seen_tuple = (
run_data["env"],
run_data["exp_type"],
run_data["config_kwargs_str"],
output_seed,
)
seen_tuples.add(seen_tuple)
df = df.append(run_data, ignore_index=True)
df.to_csv(matrix_save_data_path, sep="\t", index=False)
print(f"Run {seen_tuple} saved, {len(seen_tuples)}/{total_runs} complete.")
for p in processes:
try:
p.join(1)
except Exception as _:
pass
print("Saving HP data is done!")
| advisor-main | minigrid_and_pd_scripts/random_hp_search.py |
from allenact_plugins.babyai_plugin.scripts.truncate_expert_demos import (
make_small_demos,
)
from projects.advisor.minigrid_constants import MINIGRID_EXPERT_TRAJECTORIES_DIR
if __name__ == "__main__":
make_small_demos(MINIGRID_EXPERT_TRAJECTORIES_DIR)
| advisor-main | minigrid_and_pd_scripts/make_small_demos.py |
import abc
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from advisor_losses import AdvisorWeightedStage
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
class ObjectNavMixInADVISORConfig(ObjectNavBaseConfig):
@property
@abc.abstractmethod
def alpha(self):
raise NotImplementedError
@property
def beta(self):
return 0
def training_pipeline(self, **kwargs):
training_steps = int(300000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={
"advisor_loss": AdvisorWeightedStage(
rl_loss=PPO(**PPOConfig),
fixed_alpha=self.alpha,
fixed_bound=self.beta,
)
},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=["advisor_loss"], max_stage_steps=training_steps
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=training_steps)}
),
)
| advisor-main | objectnav_experiments/objectnav_mixin_advisor.py |
advisor-main | objectnav_experiments/__init__.py |
|
from abc import ABC
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
class ObjectNavMixInBCWithPPOConfig(ObjectNavBaseConfig, ABC):
def training_pipeline(self, **kwargs):
training_steps = int(300000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={"imitation_loss": Imitation(), "ppo_loss": PPO(**PPOConfig)},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss", "ppo_loss"],
max_stage_steps=training_steps,
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=training_steps)}
),
)
| advisor-main | objectnav_experiments/objectnav_mixin_bcwithppo.py |
from abc import ABC
from typing import Sequence, Union
import gym
import torch.nn as nn
from torchvision import models
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.embodiedai.preprocessors.resnet import ResNetPreprocessor
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact.utils.experiment_utils import Builder
from allenact_plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.models.object_nav_models import (
ResnetTensorObjectNavActorCritic,
)
class ObjectNavMixInResNetGRUWithAuxHeadConfig(ObjectNavBaseConfig, ABC):
@classmethod
def preprocessors(cls) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
preprocessors = []
rgb_sensor = next((s for s in cls.SENSORS if isinstance(s, RGBSensor)), None)
if rgb_sensor is not None:
preprocessors.append(
ResNetPreprocessor(
input_height=cls.SCREEN_SIZE,
input_width=cls.SCREEN_SIZE,
output_width=7,
output_height=7,
output_dims=512,
pool=False,
torchvision_resnet_model=models.resnet18,
input_uuids=[rgb_sensor.uuid],
output_uuid="rgb_resnet",
)
)
depth_sensor = next(
(s for s in cls.SENSORS if isinstance(s, DepthSensor)), None
)
if depth_sensor is not None:
preprocessors.append(
ResNetPreprocessor(
input_height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
input_width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
output_width=7,
output_height=7,
output_dims=512,
pool=False,
torchvision_resnet_model=models.resnet18,
input_uuids=[depth_sensor.uuid],
output_uuid="depth_resnet",
)
)
return preprocessors
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
has_rgb = any(isinstance(s, RGBSensor) for s in cls.SENSORS)
has_depth = any(isinstance(s, DepthSensor) for s in cls.SENSORS)
goal_sensor_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, GoalObjectTypeThorSensor)),
None,
)
return ResnetTensorObjectNavActorCritic(
action_space=gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
goal_sensor_uuid=goal_sensor_uuid,
rgb_resnet_preprocessor_uuid="rgb_resnet" if has_rgb else None,
depth_resnet_preprocessor_uuid="depth_resnet" if has_depth else None,
hidden_size=512,
goal_dims=32,
include_auxiliary_head=True,
)
| advisor-main | objectnav_experiments/objectnav_mixin_resnetgru_with_aux_head.py |
advisor-main | objectnav_experiments/robothor/__init__.py |
|
from typing import Optional
from allenact.base_abstractions.sensor import ExpertActionSensor
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from objectnav_experiments.objectnav_mixin_advisor import ObjectNavMixInADVISORConfig
from objectnav_experiments.objectnav_mixin_resnetgru_with_aux_head import (
ObjectNavMixInResNetGRUWithAuxHeadConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNavRoboThorRGBAdvisor(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInADVISORConfig,
ObjectNavMixInResNetGRUWithAuxHeadConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
ExpertActionSensor(nactions=len(ObjectNavTask.class_action_names()),),
]
@property
def beta(self):
if self.FIXED_BETA is None:
return super(ObjectNavRoboThorRGBAdvisor, self).beta
else:
return self.FIXED_BETA
def __init__(
self,
FIXED_ALPHA: Optional[float] = None,
FIXED_BETA: Optional[float] = None,
**kwargs,
):
super().__init__(**kwargs)
self.FIXED_ALPHA = FIXED_ALPHA
self.FIXED_BETA = FIXED_BETA
@property
def alpha(self):
if self.FIXED_ALPHA is None:
raise RuntimeError(
"`FIXED_ALPHA` is `None`,"
" this is fine for testing but should not occur"
" if you wish to use this alpha value (e.g. in testing)."
)
return self.FIXED_ALPHA
def tag(self):
return f"Objectnav-RoboTHOR-RGB-ResNetGRU-ADVISOR_{self.alpha}_{self.beta}"
| advisor-main | objectnav_experiments/robothor/objectnav_robothor_rgb_resnetgru_advisor.py |
from allenact.base_abstractions.sensor import ExpertActionSensor
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from objectnav_experiments.objectnav_mixin_bcwithppo import (
ObjectNavMixInBCWithPPOConfig,
)
from objectnav_experiments.objectnav_mixin_resnetgru_with_aux_head import (
ObjectNavMixInResNetGRUWithAuxHeadConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNavRoboThorRGBBCWithPPO(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInBCWithPPOConfig,
ObjectNavMixInResNetGRUWithAuxHeadConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
ExpertActionSensor(nactions=len(ObjectNavTask.class_action_names()),),
]
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-ResNetGRU-BCWithPPO"
| advisor-main | objectnav_experiments/robothor/objectnav_robothor_rgb_resnetgru_bcwithppo.py |
from setuptools import setup, find_packages
setup(name='comet',
version='1.0',
description='Codebase for releasing comet model code',
# url='http://github.com/storborg/funniest',
author='Antoine Bosselut',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
install_requires=[
"ftfy",
"tqdm",
"pandas"
],
zip_safe=False)
| comet-public-master | setup.py |
comet-public-master | comet/__init__.py |
|
import json
import copy
import torch
import numpy as np
import contextlib
from distutils.dir_util import mkpath
from tqdm import tqdm
def make_new_tensor_from_list(items, device_num, dtype=torch.float32):
if device_num is not None:
device = torch.device("cuda:{}".format(device_num))
else:
device = torch.device("cpu")
return torch.tensor(items, dtype=dtype, device=device)
# is_dir look ast at whether the name we make
# should be a directory or a filename
def make_name(opt, prefix="", eval_=False, is_dir=True, set_epoch=None,
do_epoch=True):
string = prefix
string += "{}-{}".format(opt.dataset, opt.exp)
string += "/"
string += "{}-{}-{}".format(opt.trainer, opt.cycle, opt.iters)
string += "/"
string += opt.model
if opt.mle:
string += "-{}".format(opt.mle)
string += "/"
string += make_name_string(opt.data) + "/"
string += make_name_string(opt.net) + "/"
string += make_name_string(opt.train.static) + "/"
if eval_:
string += make_name_string(opt.eval) + "/"
# mkpath caches whether a directory has been created
# In IPython, this can be a problem if the kernel is
# not reset after a dir is deleted. Trying to recreate
# that dir will be a problem because mkpath will think
# the directory already exists
if not is_dir:
mkpath(string)
string += make_name_string(
opt.train.dynamic, True, do_epoch, set_epoch)
if is_dir:
mkpath(string)
return string
def make_name_string(dict_, final=False, do_epoch=False, set_epoch=None):
if final:
if not do_epoch:
string = "{}_{}_{}".format(
dict_.lr, dict_.optim, dict_.bs)
elif set_epoch is not None:
string = "{}_{}_{}_{}".format(
dict_.lr, dict_.optim, dict_.bs, set_epoch)
else:
string = "{}_{}_{}_{}".format(
dict_.lr, dict_.optim, dict_.bs, dict_.epoch)
return string
string = ""
for k, v in dict_.items():
if type(v) == DD:
continue
if isinstance(v, list):
val = "#".join(is_bool(str(vv)) for vv in v)
else:
val = is_bool(v)
if string:
string += "-"
string += "{}_{}".format(k, val)
return string
def is_bool(v):
if str(v) == "False":
return "F"
elif str(v) == "True":
return "T"
return v
def generate_config_files(type_, key, name="base", eval_mode=False):
with open("config/default.json".format(type_), "r") as f:
base_config = json.load(f)
with open("config/{}/default.json".format(type_), "r") as f:
base_config_2 = json.load(f)
if eval_mode:
with open("config/{}/eval_changes.json".format(type_), "r") as f:
changes_by_machine = json.load(f)
else:
with open("config/{}/changes.json".format(type_), "r") as f:
changes_by_machine = json.load(f)
base_config.update(base_config_2)
if name in changes_by_machine:
changes = changes_by_machine[name]
else:
changes = changes_by_machine["base"]
# for param in changes[key]:
# base_config[param] = changes[key][param]
replace_params(base_config, changes[key])
mkpath("config/{}".format(type_))
with open("config/{}/config_{}.json".format(type_, key), "w") as f:
json.dump(base_config, f, indent=4)
def replace_params(base_config, changes):
for param, value in changes.items():
if isinstance(value, dict) and param in base_config:
replace_params(base_config[param], changes[param])
else:
base_config[param] = value
def initialize_progress_bar(data_loader_list):
num_examples = sum([len(tensor) for tensor in
data_loader_list.values()])
return set_progress_bar(num_examples)
def set_progress_bar(num_examples):
bar = tqdm(total=num_examples)
bar.update(0)
return bar
def merge_list_of_dicts(L):
result = {}
for d in L:
result.update(d)
return result
def return_iterator_by_type(data_type):
if isinstance(data_type, dict):
iterator = data_type.items()
else:
iterator = enumerate(data_type)
return iterator
@contextlib.contextmanager
def temp_seed(seed):
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def flatten(outer):
return [el for inner in outer for el in inner]
def zipped_flatten(outer):
return [(key, fill, el) for key, fill, inner in outer for el in inner]
def remove_none(l):
return [e for e in l if e is not None]
# Taken from Jobman 0.1
class DD(dict):
def __getattr__(self, attr):
if attr == '__getstate__':
return super(DD, self).__getstate__
elif attr == '__setstate__':
return super(DD, self).__setstate__
elif attr == '__slots__':
return super(DD, self).__slots__
return self[attr]
def __setattr__(self, attr, value):
# Safety check to ensure consistent behavior with __getattr__.
assert attr not in ('__getstate__', '__setstate__', '__slots__')
# if attr.startswith('__'):
# return super(DD, self).__setattr__(attr, value)
self[attr] = value
def __str__(self):
return 'DD%s' % dict(self)
def __repr__(self):
return str(self)
def __deepcopy__(self, memo):
z = DD()
for k, kv in self.items():
z[k] = copy.deepcopy(kv, memo)
return z
def convert_nested_dict_to_DD(obj):
if type(obj) == dict:
new_obj = DD(obj)
for k, v in obj.items():
new_DD = convert_nested_dict_to_DD(v)
new_obj[k] = new_DD
return new_obj
else:
return obj
def convert_DD_to_nested_dict(obj):
if type(obj) == DD:
x = {}
for k, v in obj.items():
x[k] = dictify(v)
return x
else:
return obj
| comet-public-master | comet/utils.py |
from comet.models.gpt import (LMModel, DEFAULT_CONFIG, load_openai_pretrained_model)
import torch.nn as nn
def make_model(opt, n_vocab, n_ctx, n_special, load=True,
return_acts=True, return_probs=False,
clf_token="<CLASS>", answer_size=None):
print(n_ctx)
if opt.exp == "generation":
model = LMModel(
opt.net, n_vocab, n_ctx, return_acts=return_acts,
return_probs=return_probs)
elif opt.exp == "classification":
model = ClfModel(
opt.net, n_vocab, n_ctx, clf_token, answer_size)
if load:
print("LOADING PRETRAINED TRANSFORMER")
load_openai_pretrained_model(
model.transformer, n_ctx=n_ctx, n_special=n_special)
return model
def multi_gpu(model, devices):
return nn.DataParallel(model, device_ids=devices)
def load_state_dict(model, state_dict):
try:
model.load_state_dict(state_dict)
except RuntimeError:
new_state_dict = {i[len("module."):]: j for i, j in state_dict.items()}
model.load_state_dict(new_state_dict)
| comet-public-master | comet/models/models.py |
comet-public-master | comet/models/__init__.py |
|
import torch
def prepare_position_embeddings(opt, encoder_vocab, sequences):
vocab_size = len(encoder_vocab)
num_positions = sequences.size(-2)
position_embeddings = torch.LongTensor(
range(vocab_size, vocab_size + num_positions)).to(sequences.device)
sequences = sequences.repeat(1, 1, 2)
sequences[:, :, 1] = position_embeddings
return sequences
| comet-public-master | comet/models/utils.py |
import copy
import json
import math
import re
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
'''
Much of this code is taken from HuggingFace's OpenAI LM Implementation here:
https://github.com/huggingface/pytorch-openai-transformer-lm
'''
def gelu(x):
return (0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) *
(x + 0.044715 * torch.pow(x, 3)))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {
'relu': nn.ReLU,
'swish': swish,
'gelu': gelu
}
class LayerNorm(nn.Module):
"Construct a layernorm module in the OpenAI style \
(epsilon inside the square root)."
def __init__(self, n_state, e=1e-5):
super(LayerNorm, self).__init__()
self.g = nn.Parameter(torch.ones(n_state))
self.b = nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.e)
return self.g * x + self.b
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.w = Parameter(w)
self.b = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, cfg, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
assert n_state % cfg.nH == 0
self.register_buffer('b', torch.tril(torch.ones(
n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = cfg.nH
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(cfg.adpt)
self.resid_dropout = nn.Dropout(cfg.rdpt)
# dimensions of w: (batch_size x num_heads x seq_length x seq_length)
def _attn(self, q, k, v, sequence_mask):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
b_subset = self.b[:, :, :w.size(-2), :w.size(-1)]
if sequence_mask is not None:
b_subset = b_subset * sequence_mask.view(
sequence_mask.size(0), 1, -1)
b_subset = b_subset.permute(1, 0, 2, 3)
w = w * b_subset + -1e9 * (1 - b_subset)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, sequence_mask):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value, sequence_mask)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a
class MLP(nn.Module):
def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = cfg.hSize
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[cfg.afn]
self.dropout = nn.Dropout(cfg.rdpt)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, cfg, scale=False):
super(Block, self).__init__()
nx = cfg.hSize
self.attn = Attention(nx, n_ctx, cfg, scale)
self.ln_1 = LayerNorm(nx)
self.mlp = MLP(4 * nx, cfg)
self.ln_2 = LayerNorm(nx)
def forward(self, x, sequence_mask):
a = self.attn(x, sequence_mask)
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
return h
class TransformerModel(nn.Module):
""" Transformer model """
def __init__(self, cfg, vocab=40990, n_ctx=512):
super(TransformerModel, self).__init__()
self.vocab = vocab
self.embed = nn.Embedding(vocab, cfg.hSize)
self.drop = nn.Dropout(cfg.edpt)
block = Block(n_ctx, cfg, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block)
for _ in range(cfg.nL)])
nn.init.normal_(self.embed.weight, std=0.02)
def forward(self, x, sequence_mask):
x = x.view(-1, x.size(-2), x.size(-1))
e = self.embed(x)
# Add the position information to the input embeddings
h = e.sum(dim=2)
for block in self.h:
h = block(h, sequence_mask)
return h
class LMModel(nn.Module):
""" Transformer with language model head only """
def __init__(self, cfg, vocab=40990, n_ctx=512,
return_probs=False, return_acts=False):
super(LMModel, self).__init__()
self.transformer = TransformerModel(cfg, vocab=vocab, n_ctx=n_ctx)
self.lm_head = LMHead(self.transformer, cfg, trunc_and_reshape=False)
self.return_probs = return_probs
self.return_acts = return_acts
if self.return_probs or self.return_acts:
pos_emb_mask = torch.zeros(1, 1, vocab)
pos_emb_mask[:, :, -n_ctx:] = -1e12
self.register_buffer('pos_emb_mask', pos_emb_mask)
def forward(self, x, sequence_mask=None):
h = self.transformer(x, sequence_mask)
lm_logits = self.lm_head(h)
if self.return_probs:
lm_logits = F.softmax(lm_logits + self.pos_emb_mask, dim=-1)
elif self.return_acts:
lm_logits = lm_logits + self.pos_emb_mask
return lm_logits
class LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model, cfg, trunc_and_reshape=True):
super(LMHead, self).__init__()
self.n_embd = cfg.hSize
embed_shape = model.embed.weight.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model.embed.weight # Tied weights
self.trunc_and_reshape = trunc_and_reshape # XD
def forward(self, h):
# Truncated Language modeling logits (we remove the last token)
h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd) \
if self.trunc_and_reshape else h # XD
lm_logits = self.decoder(h_trunc)
return lm_logits
def load_openai_pretrained_model(model, n_ctx=-1, n_special=-1, n_transfer=12,
n_embd=768, path='./model/', path_names='./'):
# Load weights from TF model
print("Loading weights...")
names = json.load(open(path_names + 'parameters_names.json'))
shapes = json.load(open(path + 'params_shapes.json'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(path + 'params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
if n_ctx > 0:
init_params[0] = init_params[0][:n_ctx]
if n_special > 0:
init_params[0] = np.concatenate(
[init_params[1],
(np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),
init_params[0]
], 0)
else:
init_params[0] = np.concatenate(
[init_params[1],
init_params[0]
], 0)
del init_params[1]
if n_transfer == -1:
n_transfer = 0
else:
n_transfer = 1 + n_transfer * 12
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.embed.weight.shape, init_params[0].shape)
raise
model.embed.weight.data = torch.from_numpy(init_params[0])
for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == ip.shape
except AssertionError as e:
e.args += (pointer.shape, ip.shape)
raise
pointer.data = torch.from_numpy(ip)
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
DEFAULT_CONFIG = dotdict({
'n_embd': 768,
'n_head': 12,
'n_layer': 12,
'embd_pdrop': 0.1,
'attn_pdrop': 0.1,
'resid_pdrop': 0.1,
'afn': 'gelu',
'clf_pdrop': 0.1})
| comet-public-master | comet/models/gpt.py |
import comet.train.batch as batch
import comet.evaluate.evaluate as base_evaluate
import numpy as np
def make_evaluator(opt, *args):
if opt.exp == "generation":
return AtomicGenerationEvaluator(opt, *args)
else:
return AtomicClassificationEvaluator(opt, *args)
class AtomicGenerationEvaluator(base_evaluate.Evaluator):
def __init__(self, opt, model, data_loader):
super(AtomicGenerationEvaluator, self).__init__(
opt, model, data_loader)
self.batch = batch.batch_atomic_generate
def initialize_losses(self):
average_loss = {"total_micro": 0, "total_macro": 0}
nums = {"total_micro": 0, "total_macro": 0}
return average_loss, nums
def compute_final_scores(self, average_loss, nums):
average_loss["total_macro"] /= nums["total_macro"]
average_loss["total_micro"] /= nums["total_micro"]
average_loss["ppl_macro"] = np.exp(average_loss["total_macro"])
average_loss["ppl_micro"] = np.exp(average_loss["total_micro"])
return average_loss
def counter(self, nums):
return nums["total_macro"]
def print_result(self, split, epoch_losses):
print("{} Loss: \t {}".format(
split, epoch_losses["total_micro"]))
print("{} Perplexity: \t {}".format(
split, epoch_losses["ppl_micro"]))
| comet-public-master | comet/evaluate/atomic_evaluate.py |
import comet.data.data as data
import comet.data.config as cfg
import comet.evaluate.sampler as sampling
def do_gen_run(opt, generator, l, split="dev", scores={}):
# Generate sequences for examples in evaluation set using
# current trained model
if opt.eval.gs == "full":
sequences, avg_scores, indiv_scores = generator.generate(split)
else:
sequences, avg_scores, indiv_scores = generator.generate_some(split)
if avg_scores is not None:
# Record scores from generated sequences
for score_name, score_val in avg_scores.items():
scores.setdefault(score_name, {})
scores[score_name].setdefault(l, [])
scores[score_name][l] += [score_val]
# Save generated sequences
save_sequences(opt, sequences, avg_scores, indiv_scores,
l, split, opt.eval.gs == "full",
generator.data_loader)
def save_sequences(opt, sequences, avg_scores, indiv_scores,
l, split, full, data_loader):
# This seems a bit roundabout since l = opt.train.dynamic in train.py
# But it's in case we start checkpointing outside of epoch boundaries
opt.train.dynamic.epoch = l
if cfg.save:
if full:
names = {"gens": "gens", "scores": "scores",
"indiv": "indiv.scores"}
else:
names = {"gens": "gens.small", "scores": "scores.small",
"indiv": "indiv.scores.small"}
# Save generated sequences
data.save_eval_file(opt, sequences, names["gens"], split)
if avg_scores is not None:
# Save average scores over evaluation set for generated sequences
# Scores computed are the ones the generator was initialized with
data.save_eval_file(opt, avg_scores, names["scores"], split)
if split == "dev":
# Save individual scores
data.save_eval_file(
opt, indiv_scores, names["indiv"], split)
class Generator(object):
def __init__(self, opt, model, data_loader, scorers, reward_function=None):
super(Generator, self).__init__()
self.opt = opt
self.model = model
self.data_loader = data_loader
self.sampler = sampling.make_sampler(
opt.eval.sample, opt, data_loader)
def generate(self, split="dev"):
pass
def generate_batch(self, sequences, split, verbose=False, bs=32):
pass
| comet-public-master | comet/evaluate/generate.py |
import time
import torch
import comet.evaluate.generate as base_generate
import comet.evaluate.sampler as sampling
import comet.utils as utils
import comet.data.config as cfg
def make_generator(opt, *args):
return ConceptNetGenerator(opt, *args)
class ConceptNetGenerator(base_generate.Generator):
def __init__(self, opt, model, data_loader):
self.opt = opt
self.model = model
self.data_loader = data_loader
self.sampler = sampling.make_sampler(
opt.eval.sample, opt, data_loader)
def reset_sequences(self):
return []
def generate(self, split="dev"):
print("Generating Sequences")
# Set evaluation mode
self.model.eval()
# Reset evaluation set for dataset split
self.data_loader.reset_offsets(splits=split, shuffle=False)
start = time.time()
count = 0
sequences = None
# Reset generated sequence buffer
sequences = self.reset_sequences()
# Initialize progress bar
bar = utils.set_progress_bar(
self.data_loader.total_size[split] / 2)
reset = False
with torch.no_grad():
# Cycle through development set
while not reset:
start = len(sequences)
# Generate a single batch
reset = self.generate_batch(sequences, split, bs=1)
end = len(sequences)
if not reset:
bar.update(end - start)
else:
print(end)
count += 1
if cfg.toy and count > 10:
break
if (self.opt.eval.gs != "full" and (count > opt.eval.gs)):
break
torch.cuda.synchronize()
print("{} generations completed in: {} s".format(
split, time.time() - start))
# Compute scores for sequences (e.g., BLEU, ROUGE)
# Computes scores that the generator is initialized with
# Change define_scorers to add more scorers as possibilities
# avg_scores, indiv_scores = self.compute_sequence_scores(
# sequences, split)
avg_scores, indiv_scores = None, None
return sequences, avg_scores, indiv_scores
def generate_batch(self, sequences, split, verbose=False, bs=1):
# Sample batch from data loader
batch, reset = self.data_loader.sample_batch(
split, bs=bs, cat="positive")
start_idx = self.data_loader.max_e1 + self.data_loader.max_r
max_end_len = self.data_loader.max_e2
context = batch["sequences"][:, :start_idx]
reference = batch["sequences"][:, start_idx:]
init = "".join([self.data_loader.vocab_decoder[i].replace(
'</w>', ' ') for i in context[:, :self.data_loader.max_e1].squeeze().tolist() if i]).strip()
start = self.data_loader.max_e1
end = self.data_loader.max_e1 + self.data_loader.max_r
attr = "".join([self.data_loader.vocab_decoder[i].replace(
'</w>', ' ') for i in context[:, start:end].squeeze(0).tolist() if i]).strip()
# Decode sequence
sampling_result = self.sampler.generate_sequence(
batch, self.model, self.data_loader, start_idx, max_end_len)
sampling_result["key"] = batch["key"]
sampling_result["e1"] = init
sampling_result["r"] = attr
sequences.append(sampling_result)
return reset
| comet-public-master | comet/evaluate/conceptnet_generate.py |
comet-public-master | comet/evaluate/__init__.py |
|
def update_classification_losses(losses, nums, name, bs, loss):
if not isinstance(loss, float):
print(type(loss))
raise
nums[name] += bs
losses[name] += loss * bs
def update_generation_losses(losses, nums, micro, macro, bs, length, loss):
# Update Losses
nums[macro] += bs
if isinstance(length, int):
update_indiv_generation_losses(
losses, nums, micro, macro, bs, length, loss)
else:
update_tensor_generation_losses(
losses, nums, micro, macro, bs, length, loss)
def update_indiv_generation_losses(losses, nums, micro,
macro, bs, length, loss):
nums[micro] += bs * length
batch_loss = loss * bs
losses[micro] += batch_loss
losses[macro] += batch_loss / length
def update_tensor_generation_losses(losses, nums, micro,
macro, bs, length, loss):
nums[micro] += length.sum().item()
losses[micro] += loss.sum().item()
losses[macro] += (loss / length.float()).sum().item()
| comet-public-master | comet/evaluate/utils.py |
import time
import numpy as np
import comet.train.batch as batch_utils
import comet.utils as utils
import comet.evaluate.evaluate as base_evaluate
def make_evaluator(opt, *args, **kwargs):
return ConceptNetGenerationEvaluator(opt, *args, **kwargs)
class ConceptNetGenerationEvaluator(base_evaluate.Evaluator):
def __init__(self, opt, model, data_loader, track=False):
super(ConceptNetGenerationEvaluator, self).__init__(
opt, model, data_loader)
if track:
self.tracker = {"positive": [], "negative": []}
else:
self.tracker = None
def batch(self, opt, nums, average_loss, batch_variables, eval_mode):
batch_variables["category"] = self.current_category
outputs = batch_utils.batch_conceptnet_generate(
opt, nums, average_loss, batch_variables, eval_mode,
tracking_mode=self.tracker is not None)
if outputs.get("tracking", None) is not None:
self.tracker[self.current_category] += outputs["tracking"]
if outputs["reset"] and batch_variables["category"] == "positive":
outputs["reset"] = False
self.current_category = "negative"
return outputs
def initialize_losses(self):
average_loss = {"total_micro": 0, "total_macro": 0,
"negative_micro": 0, "negative_macro": 0}
nums = {"total_micro": 0, "total_macro": 0,
"negative_micro": 0, "negative_macro": 0}
self.current_category = "positive"
if self.tracker is not None:
self.tracker = {"positive": [], "negative": []}
return average_loss, nums
def compute_final_scores(self, average_loss, nums):
average_loss["total_macro"] /= nums["total_macro"]
average_loss["total_micro"] /= nums["total_micro"]
if nums["negative_micro"]:
average_loss["negative_macro"] /= nums["negative_macro"]
average_loss["negative_micro"] /= nums["negative_micro"]
else:
average_loss["negative_macro"] = 0
average_loss["negative_micro"] = 0
average_loss["macro_diff"] = (average_loss["negative_macro"] -
average_loss["total_macro"])
average_loss["micro_diff"] = (average_loss["negative_micro"] -
average_loss["total_micro"])
average_loss["ppl_macro"] = np.exp(average_loss["total_macro"])
average_loss["ppl_micro"] = np.exp(average_loss["total_micro"])
return average_loss
def counter(self, nums):
return nums["total_macro"]
def print_result(self, split, epoch_losses):
print("{} Loss: \t {}".format(
split, epoch_losses["total_micro"]))
print("{} Diff: \t {}".format(
split, epoch_losses["micro_diff"]))
print("{} Perplexity: \t {}".format(
split, epoch_losses["ppl_micro"]))
| comet-public-master | comet/evaluate/conceptnet_evaluate.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.