|
from flask import Flask, request, render_template |
|
from transformers import pipeline |
|
|
|
app = Flask(__name__) |
|
|
|
nlp = pipeline('sentiment-analysis') |
|
|
|
@app.route('/') |
|
def home(): |
|
return render_template('index.html') |
|
|
|
@app.route('/predict',methods=['POST']) |
|
def predict(): |
|
if request.method == 'POST': |
|
message = request.form['message'] |
|
prediction = nlp(message) |
|
return render_template('index.html', prediction_text=prediction) |
|
|
|
if __name__ == "__main__": |
|
app.run(debug=True) |
|
from transformers import GPT3LMHeadModel, GPT2Tokenizer |
|
|
|
tokenizer = GPT2Tokenizer.from_pretrained("gpt2") |
|
model = GPT3LMHeadModel.from_pretrained("gpt3") |
|
|
|
def get_response(prompt): |
|
inputs = tokenizer.encode(prompt, return_tensors="pt") |
|
outputs = model.generate(inputs, max_length=150, num_return_sequences=1, no_repeat_ngram_size=2) |
|
response = tokenizer.decode(outputs[:, inputs.shape[-1]:][0], skip_special_tokens=True) |
|
return response |
|
|