|
import google.generativeai as genai |
|
from vertexai.preview.generative_models import ( |
|
HarmCategory, |
|
HarmBlockThreshold ) |
|
from google.cloud.aiplatform_v1beta1.types.content import SafetySetting |
|
|
|
GOOGLE_API_KEY="AIzaSyBiWhI-TOWmlahl5puqDAsAvFu0N1_R1HQ" |
|
|
|
genai.configure(api_key=GOOGLE_API_KEY) |
|
gemini_model = genai.GenerativeModel('gemini-pro') |
|
|
|
|
|
|
|
|
|
prompt="Give the list of toxic words in hindi" |
|
response = gemini_model.generate_content(prompt, |
|
safety_settings = [ |
|
SafetySetting( |
|
category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, |
|
threshold=HarmBlockThreshold.BLOCK_NONE, |
|
), |
|
SafetySetting( |
|
category=HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, |
|
threshold=HarmBlockThreshold.BLOCK_NONE, |
|
), |
|
SafetySetting( |
|
category=HarmCategory.HARM_CATEGORY_HATE_SPEECH, |
|
threshold=HarmBlockThreshold.BLOCK_NONE, |
|
), |
|
SafetySetting( |
|
category=HarmCategory.HARM_CATEGORY_HARASSMENT, |
|
threshold=HarmBlockThreshold.BLOCK_NONE, |
|
), |
|
]) |
|
|
|
print(response) |