lg3394 commited on
Commit
739e1bf
·
verified ·
1 Parent(s): 400c563

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -10
app.py CHANGED
@@ -2,6 +2,11 @@ import gradio as gr
2
  import openai
3
  import os
4
  from anthropic import Anthropic
 
 
 
 
 
5
 
6
  # Load OpenAI and Anthropic API Keys from environment variables
7
  openai.api_key = os.getenv("openaiapikey")
@@ -12,6 +17,42 @@ client = Anthropic(api_key=anthropic_api_key)
12
 
13
  MODEL_NAME = "claude-3-haiku-20240307"
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def moderate_text(user_text):
16
  # OpenAI Moderation
17
  openai_moderation_result = "Error in OpenAI Moderation"
@@ -27,12 +68,11 @@ def moderate_text(user_text):
27
  except Exception as e:
28
  openai_moderation_result = f"Error occurred with OpenAI: {e}"
29
 
30
- # Anthropic Moderation (Using Prompt Template)
31
  anthropic_moderation_result = "Error in Anthropic Moderation"
32
  try:
33
- # Create the prompt template for Anthropic's moderation model
34
  prompt_template = """
35
- You are a content moderation expert tasked with categorizing user-generated text based on the following guidelines:
36
 
37
  Here is the user-generated text to categorize:
38
  <user_text>{user_text}</user_text>
@@ -50,23 +90,25 @@ def moderate_text(user_text):
50
  messages=[{"role": "user", "content": prompt}]
51
  ).content[0].text
52
 
53
- # Format the Anthropic moderation result
54
  anthropic_moderation_result = f"Anthropic's moderation result: {response}"
55
 
56
  except Exception as e:
57
  anthropic_moderation_result = f"Error occurred with Anthropic: {e}"
58
 
59
- return openai_moderation_result, anthropic_moderation_result
 
 
 
60
 
61
 
62
- # Create the Gradio interface
63
  iface = gr.Interface(
64
  fn=moderate_text,
65
- inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
66
- outputs=["text", "text"],
67
  title="Content Moderation Tool",
68
- description="Enter some text and get the moderation results from OpenAI and Anthropic."
69
  )
70
 
71
  if __name__ == "__main__":
72
- iface.launch()
 
2
  import openai
3
  import os
4
  from anthropic import Anthropic
5
+ from azure.ai.contentsafety import ContentSafetyClient
6
+ from azure.ai.contentsafety.models import TextCategory
7
+ from azure.core.credentials import AzureKeyCredential
8
+ from azure.core.exceptions import HttpResponseError
9
+ from azure.ai.contentsafety.models import AnalyzeTextOptions
10
 
11
  # Load OpenAI and Anthropic API Keys from environment variables
12
  openai.api_key = os.getenv("openaiapikey")
 
17
 
18
  MODEL_NAME = "claude-3-haiku-20240307"
19
 
20
+ # Function for Azure Content Safety analysis
21
+ def analyze_text_azure(user_text):
22
+ # Retrieve Azure keys from Hugging Face secrets (as environment variables)
23
+ key = os.getenv("azurekey")
24
+ endpoint = os.getenv("azureendpoint")
25
+
26
+ # Create Content Safety client
27
+ client = ContentSafetyClient(endpoint, AzureKeyCredential(key))
28
+
29
+ # Construct request
30
+ request = AnalyzeTextOptions(text=user_text)
31
+
32
+ # Analyze text
33
+ try:
34
+ response = client.analyze_text(request)
35
+ except HttpResponseError as e:
36
+ return f"Error occurred with Azure Content Safety: {e}"
37
+
38
+ # Extract moderation results
39
+ hate_result = next((item for item in response.categories_analysis if item.category == TextCategory.HATE), None)
40
+ self_harm_result = next((item for item in response.categories_analysis if item.category == TextCategory.SELF_HARM), None)
41
+ sexual_result = next((item for item in response.categories_analysis if item.category == TextCategory.SEXUAL), None)
42
+ violence_result = next((item for item in response.categories_analysis if item.category == TextCategory.VIOLENCE), None)
43
+
44
+ results = []
45
+ if hate_result:
46
+ results.append(f"Hate severity: {hate_result.severity}")
47
+ if self_harm_result:
48
+ results.append(f"SelfHarm severity: {self_harm_result.severity}")
49
+ if sexual_result:
50
+ results.append(f"Sexual severity: {sexual_result.severity}")
51
+ if violence_result:
52
+ results.append(f"Violence severity: {violence_result.severity}")
53
+
54
+ return "\n".join(results) if results else "No flagged content detected in Azure Content Safety."
55
+
56
  def moderate_text(user_text):
57
  # OpenAI Moderation
58
  openai_moderation_result = "Error in OpenAI Moderation"
 
68
  except Exception as e:
69
  openai_moderation_result = f"Error occurred with OpenAI: {e}"
70
 
71
+ # Anthropic Moderation
72
  anthropic_moderation_result = "Error in Anthropic Moderation"
73
  try:
 
74
  prompt_template = """
75
+ You are a content moderation expert tasked with categorizing user-generated text.
76
 
77
  Here is the user-generated text to categorize:
78
  <user_text>{user_text}</user_text>
 
90
  messages=[{"role": "user", "content": prompt}]
91
  ).content[0].text
92
 
 
93
  anthropic_moderation_result = f"Anthropic's moderation result: {response}"
94
 
95
  except Exception as e:
96
  anthropic_moderation_result = f"Error occurred with Anthropic: {e}"
97
 
98
+ # Azure Content Safety Moderation
99
+ azure_moderation_result = analyze_text_azure(user_text)
100
+
101
+ return openai_moderation_result, anthropic_moderation_result, azure_moderation_result
102
 
103
 
104
+ # Create the Gradio interface with updated input and output labels
105
  iface = gr.Interface(
106
  fn=moderate_text,
107
+ inputs=gr.Textbox(lines=2, placeholder="Please write your text here..."),
108
+ outputs=[gr.Textbox(label="OpenAI"), gr.Textbox(label="Anthropic"), gr.Textbox(label="Microsoft Azure")],
109
  title="Content Moderation Tool",
110
+ description="Enter some text and get the moderation results from OpenAI, Anthropic, and Azure Content Safety."
111
  )
112
 
113
  if __name__ == "__main__":
114
+ iface.launch()