Ashgen12 commited on
Commit
f17b516
·
verified ·
1 Parent(s): 0b1188e

genai_handler

Browse files
Files changed (1) hide show
  1. genai_handler.py +212 -211
genai_handler.py CHANGED
@@ -1,212 +1,213 @@
1
- # ai_test_generator/genai_handler.py
2
- import os
3
- import json
4
- import logging
5
- import pandas as pd
6
- from openai import OpenAI, APIError, RateLimitError
7
-
8
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
9
-
10
- # --- Configuration ---
11
- # It's better practice to use environment variables for API keys,
12
- # but using the provided key directly for this specific case.
13
- API_KEY = "ddc-beta-v7bjela50v-lI9ep55oPFJz7N06MjSh2Asj2AVGaubLqIC"
14
- BASE_URL = "https://beta.sree.shop/v1"
15
- MODEL_NAME = "Provider-5/gpt-4o" # Use the specified model
16
-
17
- try:
18
- client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
19
- logging.info(f"OpenAI client initialized for model: {MODEL_NAME}")
20
- except Exception as e:
21
- logging.error(f"Failed to initialize OpenAI client: {e}", exc_info=True)
22
- client = None # Ensure client is None if initialization fails
23
-
24
- # --- Helper Function to Parse AI Response for Test Cases ---
25
- def parse_ai_test_cases(response_content: str) -> list[dict]:
26
- """Attempts to parse the AI response string into a list of test case dictionaries."""
27
- try:
28
- # Try parsing directly as JSON if the AI follows instructions perfectly
29
- parsed_data = json.loads(response_content)
30
- if isinstance(parsed_data, list) and all(isinstance(item, dict) for item in parsed_data):
31
- # Basic validation for expected keys (can be made more robust)
32
- if parsed_data and all(k in parsed_data[0] for k in ['Test Case ID', 'Test Scenario', 'Steps to Execute', 'Expected Result']):
33
- logging.info("Successfully parsed AI response as JSON list of test cases.")
34
- return parsed_data
35
- except json.JSONDecodeError:
36
- logging.warning("AI response is not a direct JSON list. Trying to extract from markdown or other formats.")
37
- # Add more sophisticated parsing here if needed (e.g., regex for markdown tables)
38
- # For now, return empty if direct JSON parsing fails
39
- pass # Fall through to return empty list
40
-
41
- logging.error("Failed to parse AI response into the expected test case format.")
42
- return []
43
-
44
-
45
- # --- Test Case Generation ---
46
- def generate_test_cases(elements_json_str: str, url: str, num_cases: int = 5) -> pd.DataFrame:
47
- """
48
- Generates test cases using GenAI based on extracted UI elements.
49
-
50
- Args:
51
- elements_json_str: A JSON string representing the extracted UI elements.
52
- url: The URL of the website being tested (for context).
53
- num_cases: The desired number of test cases (default 3-5, AI might vary).
54
-
55
- Returns:
56
- A pandas DataFrame containing the generated test cases, or an empty DataFrame on failure.
57
- """
58
- if not client:
59
- logging.error("GenAI client is not available.")
60
- return pd.DataFrame()
61
-
62
- prompt = f"""
63
- Analyze the following UI elements extracted from the website {url}:
64
- ```json
65
- {elements_json_str}
66
- ```
67
-
68
- Based on these elements, generate {num_cases} meaningful test cases covering common user interactions like navigation, form interaction (if any), viewing content, etc. Focus on positive and potentially simple negative scenarios relevant to the visible elements.
69
-
70
- Present the test cases ONLY as a valid JSON list of objects. Each object must have the following keys:
71
- - "Test Case ID": A unique identifier (e.g., TC001, TC002).
72
- - "Test Scenario": A brief description of the test objective.
73
- - "Steps to Execute": Numbered steps describing how to perform the test manually. Mention specific element identifiers (text, id, placeholder) where possible from the JSON above.
74
- - "Expected Result": What should happen after executing the steps.
75
-
76
- Example format for a single test case object:
77
- {{
78
- "Test Case ID": "TC001",
79
- "Test Scenario": "Verify user can navigate to the 'Contact' page",
80
- "Steps to Execute": "1. Go to the homepage.\n2. Click on the link with text 'Contact'.",
81
- "Expected Result": "The contact page should load successfully, displaying contact information or a contact form."
82
- }}
83
-
84
- Ensure the entire output is *only* the JSON list, starting with '[' and ending with ']'. Do not include any introductory text, explanations, or markdown formatting outside the JSON structure itself.
85
- """
86
-
87
- logging.info(f"Generating {num_cases} test cases for {url}...")
88
- try:
89
- response = client.chat.completions.create(
90
- model=MODEL_NAME,
91
- messages=[
92
- {"role": "system", "content": "You are an expert QA engineer generating test cases from UI elements."},
93
- {"role": "user", "content": prompt}
94
- ],
95
- temperature=0.5, # Lower temperature for more predictable structure
96
- max_tokens=1500 # Adjust as needed
97
- )
98
-
99
- response_content = response.choices[0].message.content.strip()
100
- logging.info("Raw AI Response for Test Cases:\n" + response_content) # Log the raw response
101
-
102
- # Parse the response
103
- test_cases_list = parse_ai_test_cases(response_content)
104
-
105
- if not test_cases_list:
106
- logging.error("Failed to parse test cases from AI response.")
107
- # Attempt to return raw response in a placeholder format if parsing fails
108
- return pd.DataFrame([{'Test Case ID': 'PARSE_ERROR', 'Test Scenario': 'Failed to parse AI response', 'Steps to Execute': response_content, 'Expected Result': 'N/A'}])
109
-
110
-
111
- df = pd.DataFrame(test_cases_list)
112
- # Ensure standard columns exist, even if AI missed some
113
- for col in ['Test Case ID', 'Test Scenario', 'Steps to Execute', 'Expected Result']:
114
- if col not in df.columns:
115
- df[col] = 'N/A' # Add missing columns with default value
116
- df = df[['Test Case ID', 'Test Scenario', 'Steps to Execute', 'Expected Result']] # Enforce order
117
-
118
- logging.info(f"Successfully generated and parsed {len(df)} test cases.")
119
- return df
120
-
121
- except RateLimitError as e:
122
- logging.error(f"API Rate Limit Error: {e}")
123
- return pd.DataFrame([{'Test Case ID': 'API_ERROR', 'Test Scenario': 'Rate Limit Reached', 'Steps to Execute': str(e), 'Expected Result': 'N/A'}])
124
- except APIError as e:
125
- logging.error(f"API Error during test case generation: {e}")
126
- return pd.DataFrame([{'Test Case ID': 'API_ERROR', 'Test Scenario': 'API Communication Issue', 'Steps to Execute': str(e), 'Expected Result': 'N/A'}])
127
- except Exception as e:
128
- logging.error(f"Unexpected error during test case generation: {e}", exc_info=True)
129
- return pd.DataFrame([{'Test Case ID': 'ERROR', 'Test Scenario': 'Unexpected Error', 'Steps to Execute': str(e), 'Expected Result': 'N/A'}])
130
-
131
-
132
- # --- Selenium Script Generation ---
133
- def generate_selenium_script(test_case: dict, elements_json_str: str, url: str) -> str:
134
- """
135
- Generates a Python Selenium script for a single test case using GenAI.
136
-
137
- Args:
138
- test_case: A dictionary representing a single test case (needs 'Test Case ID', 'Steps to Execute').
139
- elements_json_str: A JSON string of extracted UI elements for context.
140
- url: The target website URL.
141
-
142
- Returns:
143
- A string containing the generated Python Selenium script, or an error message string.
144
- """
145
- if not client:
146
- logging.error("GenAI client is not available.")
147
- return "Error: GenAI client not initialized."
148
-
149
- test_case_id = test_case.get('Test Case ID', 'Unknown TC')
150
- steps = test_case.get('Steps to Execute', 'No steps provided.')
151
- scenario = test_case.get('Test Scenario', 'No scenario provided.')
152
-
153
- prompt = f"""
154
- Generate a complete, runnable Python Selenium script to automate the following test case for the website {url}.
155
-
156
- Test Case ID: {test_case_id}
157
- Test Scenario: {scenario}
158
- Steps to Execute:
159
- {steps}
160
-
161
- Use the following UI element details extracted from the page for context when choosing selectors. Prefer using ID, then Name, then CSS Selector, then Link Text, then XPath. Handle potential waits for elements to be clickable or visible.
162
- ```json
163
- {elements_json_str}
164
- ```
165
-
166
- The script should:
167
- 1. Include necessary imports (Selenium webdriver, By, time, etc.).
168
- 2. Set up the ChromeDriver (using webdriver-manager is preferred). Run in headless mode.
169
- 3. Navigate to the base URL: {url}.
170
- 4. Implement the test steps described above using Selenium commands (find_element, click, send_keys, etc.). Use robust locators based on the provided element details. Include reasonable waits (e.g., `time.sleep(1)` or explicit waits) after actions like clicks or navigation.
171
- 5. Include a basic assertion relevant to the 'Expected Result' or the final step (e.g., check page title, check for an element's presence/text). If the expected result is vague, make a reasonable assertion based on the steps.
172
- 6. Print a success or failure message to the console based on the assertion.
173
- 7. Include teardown code to close the browser (`driver.quit()`) in a `finally` block.
174
- 8. Be fully contained within a single Python code block.
175
-
176
- Output *only* the Python code for the script. Do not include any explanations, introductory text, or markdown formatting like ```python ... ```.
177
- """
178
-
179
- logging.info(f"Generating Selenium script for Test Case ID: {test_case_id}...")
180
- try:
181
- response = client.chat.completions.create(
182
- model=MODEL_NAME,
183
- messages=[
184
- {"role": "system", "content": "You are an expert QA automation engineer generating Python Selenium scripts."},
185
- {"role": "user", "content": prompt}
186
- ],
187
- temperature=0.3, # Low temperature for more deterministic code generation
188
- max_tokens=2000 # Allow ample space for code
189
- )
190
-
191
- script_code = response.choices[0].message.content.strip()
192
-
193
- # Basic cleanup: remove potential markdown fences if AI includes them
194
- if script_code.startswith("```python"):
195
- script_code = script_code[len("```python"):].strip()
196
- if script_code.endswith("```"):
197
- script_code = script_code[:-len("```")].strip()
198
-
199
- logging.info(f"Successfully generated script for {test_case_id}.")
200
- # Log first few lines of the script for verification
201
- logging.debug(f"Generated script (first 100 chars): {script_code[:100]}...")
202
- return script_code
203
-
204
- except RateLimitError as e:
205
- logging.error(f"API Rate Limit Error during script generation for {test_case_id}: {e}")
206
- return f"# Error: API Rate Limit Reached\n# {e}"
207
- except APIError as e:
208
- logging.error(f"API Error during script generation for {test_case_id}: {e}")
209
- return f"# Error: API Communication Issue\n# {e}"
210
- except Exception as e:
211
- logging.error(f"Unexpected error during script generation for {test_case_id}: {e}", exc_info=True)
 
212
  return f"# Error: Unexpected error during script generation\n# {e}"
 
1
+ # ai_test_generator/genai_handler.py
2
+ import os
3
+ import json
4
+ import logging
5
+ import pandas as pd
6
+ from openai import OpenAI, APIError, RateLimitError
7
+
8
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
9
+
10
+ API_KEY_ENV_VAR = "API_TOKEN"
11
+ api_key_value = os.getenv(API_KEY_ENV_VAR)
12
+
13
+ # --- Configuration ---
14
+ API_KEY = "ddc-beta-v7bjela50v-lI9ep55oPFJz7N06MjSh2Asj2AVGaubLqIC"
15
+ BASE_URL = "https://beta.sree.shop/v1"
16
+ MODEL_NAME = "Provider-5/gpt-4o" # Use the specified model
17
+
18
+ try:
19
+ client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
20
+ logging.info(f"OpenAI client initialized for model: {MODEL_NAME}")
21
+ except Exception as e:
22
+ logging.error(f"Failed to initialize OpenAI client: {e}", exc_info=True)
23
+ client = None # Ensure client is None if initialization fails
24
+
25
+ # --- Helper Function to Parse AI Response for Test Cases ---
26
+ def parse_ai_test_cases(response_content: str) -> list[dict]:
27
+ """Attempts to parse the AI response string into a list of test case dictionaries."""
28
+ try:
29
+ # Try parsing directly as JSON if the AI follows instructions perfectly
30
+ parsed_data = json.loads(response_content)
31
+ if isinstance(parsed_data, list) and all(isinstance(item, dict) for item in parsed_data):
32
+ # Basic validation for expected keys (can be made more robust)
33
+ if parsed_data and all(k in parsed_data[0] for k in ['Test Case ID', 'Test Scenario', 'Steps to Execute', 'Expected Result']):
34
+ logging.info("Successfully parsed AI response as JSON list of test cases.")
35
+ return parsed_data
36
+ except json.JSONDecodeError:
37
+ logging.warning("AI response is not a direct JSON list. Trying to extract from markdown or other formats.")
38
+ # Add more sophisticated parsing here if needed (e.g., regex for markdown tables)
39
+ # For now, return empty if direct JSON parsing fails
40
+ pass # Fall through to return empty list
41
+
42
+ logging.error("Failed to parse AI response into the expected test case format.")
43
+ return []
44
+
45
+
46
+ # --- Test Case Generation ---
47
+ def generate_test_cases(elements_json_str: str, url: str, num_cases: int = 5) -> pd.DataFrame:
48
+ """
49
+ Generates test cases using GenAI based on extracted UI elements.
50
+
51
+ Args:
52
+ elements_json_str: A JSON string representing the extracted UI elements.
53
+ url: The URL of the website being tested (for context).
54
+ num_cases: The desired number of test cases (default 3-5, AI might vary).
55
+
56
+ Returns:
57
+ A pandas DataFrame containing the generated test cases, or an empty DataFrame on failure.
58
+ """
59
+ if not client:
60
+ logging.error("GenAI client is not available.")
61
+ return pd.DataFrame()
62
+
63
+ prompt = f"""
64
+ Analyze the following UI elements extracted from the website {url}:
65
+ ```json
66
+ {elements_json_str}
67
+ ```
68
+
69
+ Based on these elements, generate {num_cases} meaningful test cases covering common user interactions like navigation, form interaction (if any), viewing content, etc. Focus on positive and potentially simple negative scenarios relevant to the visible elements.
70
+
71
+ Present the test cases ONLY as a valid JSON list of objects. Each object must have the following keys:
72
+ - "Test Case ID": A unique identifier (e.g., TC001, TC002).
73
+ - "Test Scenario": A brief description of the test objective.
74
+ - "Steps to Execute": Numbered steps describing how to perform the test manually. Mention specific element identifiers (text, id, placeholder) where possible from the JSON above.
75
+ - "Expected Result": What should happen after executing the steps.
76
+
77
+ Example format for a single test case object:
78
+ {{
79
+ "Test Case ID": "TC001",
80
+ "Test Scenario": "Verify user can navigate to the 'Contact' page",
81
+ "Steps to Execute": "1. Go to the homepage.\n2. Click on the link with text 'Contact'.",
82
+ "Expected Result": "The contact page should load successfully, displaying contact information or a contact form."
83
+ }}
84
+
85
+ Ensure the entire output is *only* the JSON list, starting with '[' and ending with ']'. Do not include any introductory text, explanations, or markdown formatting outside the JSON structure itself.
86
+ """
87
+
88
+ logging.info(f"Generating {num_cases} test cases for {url}...")
89
+ try:
90
+ response = client.chat.completions.create(
91
+ model=MODEL_NAME,
92
+ messages=[
93
+ {"role": "system", "content": "You are an expert QA engineer generating test cases from UI elements."},
94
+ {"role": "user", "content": prompt}
95
+ ],
96
+ temperature=0.5, # Lower temperature for more predictable structure
97
+ max_tokens=1500 # Adjust as needed
98
+ )
99
+
100
+ response_content = response.choices[0].message.content.strip()
101
+ logging.info("Raw AI Response for Test Cases:\n" + response_content) # Log the raw response
102
+
103
+ # Parse the response
104
+ test_cases_list = parse_ai_test_cases(response_content)
105
+
106
+ if not test_cases_list:
107
+ logging.error("Failed to parse test cases from AI response.")
108
+ # Attempt to return raw response in a placeholder format if parsing fails
109
+ return pd.DataFrame([{'Test Case ID': 'PARSE_ERROR', 'Test Scenario': 'Failed to parse AI response', 'Steps to Execute': response_content, 'Expected Result': 'N/A'}])
110
+
111
+
112
+ df = pd.DataFrame(test_cases_list)
113
+ # Ensure standard columns exist, even if AI missed some
114
+ for col in ['Test Case ID', 'Test Scenario', 'Steps to Execute', 'Expected Result']:
115
+ if col not in df.columns:
116
+ df[col] = 'N/A' # Add missing columns with default value
117
+ df = df[['Test Case ID', 'Test Scenario', 'Steps to Execute', 'Expected Result']] # Enforce order
118
+
119
+ logging.info(f"Successfully generated and parsed {len(df)} test cases.")
120
+ return df
121
+
122
+ except RateLimitError as e:
123
+ logging.error(f"API Rate Limit Error: {e}")
124
+ return pd.DataFrame([{'Test Case ID': 'API_ERROR', 'Test Scenario': 'Rate Limit Reached', 'Steps to Execute': str(e), 'Expected Result': 'N/A'}])
125
+ except APIError as e:
126
+ logging.error(f"API Error during test case generation: {e}")
127
+ return pd.DataFrame([{'Test Case ID': 'API_ERROR', 'Test Scenario': 'API Communication Issue', 'Steps to Execute': str(e), 'Expected Result': 'N/A'}])
128
+ except Exception as e:
129
+ logging.error(f"Unexpected error during test case generation: {e}", exc_info=True)
130
+ return pd.DataFrame([{'Test Case ID': 'ERROR', 'Test Scenario': 'Unexpected Error', 'Steps to Execute': str(e), 'Expected Result': 'N/A'}])
131
+
132
+
133
+ # --- Selenium Script Generation ---
134
+ def generate_selenium_script(test_case: dict, elements_json_str: str, url: str) -> str:
135
+ """
136
+ Generates a Python Selenium script for a single test case using GenAI.
137
+
138
+ Args:
139
+ test_case: A dictionary representing a single test case (needs 'Test Case ID', 'Steps to Execute').
140
+ elements_json_str: A JSON string of extracted UI elements for context.
141
+ url: The target website URL.
142
+
143
+ Returns:
144
+ A string containing the generated Python Selenium script, or an error message string.
145
+ """
146
+ if not client:
147
+ logging.error("GenAI client is not available.")
148
+ return "Error: GenAI client not initialized."
149
+
150
+ test_case_id = test_case.get('Test Case ID', 'Unknown TC')
151
+ steps = test_case.get('Steps to Execute', 'No steps provided.')
152
+ scenario = test_case.get('Test Scenario', 'No scenario provided.')
153
+
154
+ prompt = f"""
155
+ Generate a complete, runnable Python Selenium script to automate the following test case for the website {url}.
156
+
157
+ Test Case ID: {test_case_id}
158
+ Test Scenario: {scenario}
159
+ Steps to Execute:
160
+ {steps}
161
+
162
+ Use the following UI element details extracted from the page for context when choosing selectors. Prefer using ID, then Name, then CSS Selector, then Link Text, then XPath. Handle potential waits for elements to be clickable or visible.
163
+ ```json
164
+ {elements_json_str}
165
+ ```
166
+
167
+ The script should:
168
+ 1. Include necessary imports (Selenium webdriver, By, time, etc.).
169
+ 2. Set up the ChromeDriver (using webdriver-manager is preferred). Run in headless mode.
170
+ 3. Navigate to the base URL: {url}.
171
+ 4. Implement the test steps described above using Selenium commands (find_element, click, send_keys, etc.). Use robust locators based on the provided element details. Include reasonable waits (e.g., `time.sleep(1)` or explicit waits) after actions like clicks or navigation.
172
+ 5. Include a basic assertion relevant to the 'Expected Result' or the final step (e.g., check page title, check for an element's presence/text). If the expected result is vague, make a reasonable assertion based on the steps.
173
+ 6. Print a success or failure message to the console based on the assertion.
174
+ 7. Include teardown code to close the browser (`driver.quit()`) in a `finally` block.
175
+ 8. Be fully contained within a single Python code block.
176
+
177
+ Output *only* the Python code for the script. Do not include any explanations, introductory text, or markdown formatting like ```python ... ```.
178
+ """
179
+
180
+ logging.info(f"Generating Selenium script for Test Case ID: {test_case_id}...")
181
+ try:
182
+ response = client.chat.completions.create(
183
+ model=MODEL_NAME,
184
+ messages=[
185
+ {"role": "system", "content": "You are an expert QA automation engineer generating Python Selenium scripts."},
186
+ {"role": "user", "content": prompt}
187
+ ],
188
+ temperature=0.3, # Low temperature for more deterministic code generation
189
+ max_tokens=2000 # Allow ample space for code
190
+ )
191
+
192
+ script_code = response.choices[0].message.content.strip()
193
+
194
+ # Basic cleanup: remove potential markdown fences if AI includes them
195
+ if script_code.startswith("```python"):
196
+ script_code = script_code[len("```python"):].strip()
197
+ if script_code.endswith("```"):
198
+ script_code = script_code[:-len("```")].strip()
199
+
200
+ logging.info(f"Successfully generated script for {test_case_id}.")
201
+ # Log first few lines of the script for verification
202
+ logging.debug(f"Generated script (first 100 chars): {script_code[:100]}...")
203
+ return script_code
204
+
205
+ except RateLimitError as e:
206
+ logging.error(f"API Rate Limit Error during script generation for {test_case_id}: {e}")
207
+ return f"# Error: API Rate Limit Reached\n# {e}"
208
+ except APIError as e:
209
+ logging.error(f"API Error during script generation for {test_case_id}: {e}")
210
+ return f"# Error: API Communication Issue\n# {e}"
211
+ except Exception as e:
212
+ logging.error(f"Unexpected error during script generation for {test_case_id}: {e}", exc_info=True)
213
  return f"# Error: Unexpected error during script generation\n# {e}"