1- ---
2- name : Test Azure OpenAI Responses API
1+ name : Validate Azure OpenAI response
32
43on :
5- workflow_dispatch : # Allows manual triggering of the workflow
4+ workflow_dispatch : # run on demand from the Actions tab
65
76jobs :
8- test-responses-api :
7+ run-validation :
98 runs-on : ubuntu-latest
10- environment : responses # Use the 'responses' environment for secrets
9+ environment : responses # 🔑 unlocks the environment‑scoped secrets
1110
12- steps :
13- - name : Checkout repository
14- uses : actions/checkout@v4
15-
16- - name : Set up Python
17- uses : actions/setup-python@v5
18- with :
19- python-version : ' 3.11'
20-
21- - name : Install dependencies
22- run : |
23- python -m pip install --upgrade pip
24- pip install -r requirements.txt
25-
26- - name : Test Azure OpenAI Responses API
27- env :
28- AZURE_OPENAI_API_KEY : ${{ secrets.AZURE_OPENAI_API_KEY }}
29- AZURE_OPENAI_V1_API_ENDPOINT : ${{ secrets.AZURE_OPENAI_V1_API_ENDPOINT }}
30- AZURE_OPENAI_API_MODEL : ${{ secrets.AZURE_OPENAI_API_MODEL }}
31- run : |
32- echo "Testing responses-basic-aoai-v1.py script..."
33-
34- # Verify required environment variables are set
35- if [ -z "$AZURE_OPENAI_API_KEY" ] || [ -z "$AZURE_OPENAI_V1_API_ENDPOINT" ] || [ -z "$AZURE_OPENAI_API_MODEL" ]; then
36- echo "❌ Error: Required environment variables are not set"
37- echo "AZURE_OPENAI_API_KEY: ${AZURE_OPENAI_API_KEY:+set}"
38- echo "AZURE_OPENAI_V1_API_ENDPOINT: ${AZURE_OPENAI_V1_API_ENDPOINT:+set}"
39- echo "AZURE_OPENAI_API_MODEL: ${AZURE_OPENAI_API_MODEL:+set}"
40- exit 1
41- fi
42-
43- # Verify jq is available
44- if ! command -v jq > /dev/null; then
45- echo "❌ Error: jq is not available"
46- exit 1
47- fi
48-
49- echo "✅ Environment check passed"
50-
51- # Create test results directory
52- mkdir -p test-results
53-
54- # Get current timestamp
55- timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
56- echo "Test run timestamp: $timestamp"
57-
58- # Run the script and capture output
59- echo "Running Python script..."
60- python responses-basic-aoai-v1.py > output.txt 2>&1
61- exit_code=$?
11+ # Expose the environment secrets as real process env‑vars
12+ env :
13+ AZURE_OPENAI_API_KEY : ${{ secrets.AZURE_OPENAI_API_KEY }}
14+ AZURE_OPENAI_V1_API_ENDPOINT : ${{ secrets.AZURE_OPENAI_V1_API_ENDPOINT }}
15+ AZURE_OPENAI_API_MODEL : ${{ secrets.AZURE_OPENAI_API_MODEL }}
6216
63- # Initialize test result variables
64- pass_fail="PASS"
65- error_code=""
66- output_text=""
67-
68- # Check if script executed successfully
69- if [ $exit_code -eq 0 ]; then
70- echo "✅ Script executed successfully"
71-
72- # Check if output was generated and capture it
73- if [ -s output.txt ]; then
74- output_text=$(cat output.txt)
75- echo "✅ Script produced output:"
76- echo "$output_text"
77-
78- # Test whether response.output_text contains a valid string
79- # Valid means: non-empty, no error indicators, and actual content
80- if [ -n "$output_text" ] && ! echo "$output_text" | grep -qi "error\|exception\|traceback\|failed\|none\|null"; then
81- echo "✅ Output contains valid string content"
82- pass_fail="PASS"
83- else
84- echo "❌ Output does not contain valid string content"
85- pass_fail="FAIL"
86- error_code="INVALID_OUTPUT"
87- fi
88- else
89- echo "❌ Script produced no output"
90- pass_fail="FAIL"
91- error_code="NO_OUTPUT"
92- fi
93- else
94- echo "❌ Script failed with exit code: $exit_code"
95- echo "Error output:"
96- cat output.txt
97- output_text=$(cat output.txt)
98- pass_fail="FAIL"
99- error_code="SCRIPT_ERROR_$exit_code"
100- fi
101-
102- # Create test results JSON (using jq for proper JSON formatting)
103- jq -n \
104- --arg timestamp "$timestamp" \
105- --arg output "$output_text" \
106- --arg pass_fail "$pass_fail" \
107- --arg error_code "$error_code" \
108- '{
109- test_last_run_date: $timestamp,
110- output: $output,
111- pass_fail: $pass_fail,
112- error_code: $error_code
113- }' > test-results/test-results.json
114-
115- # Also create a human-readable summary
116- echo "Azure OpenAI Responses API Test Results" > test-results/test-summary.txt
117- echo "========================================" >> test-results/test-summary.txt
118- echo "Test Run Date: $timestamp" >> test-results/test-summary.txt
119- echo "Result: $pass_fail" >> test-results/test-summary.txt
120- echo "Error Code: $error_code" >> test-results/test-summary.txt
121- echo "" >> test-results/test-summary.txt
122- echo "Output:" >> test-results/test-summary.txt
123- echo "$output_text" >> test-results/test-summary.txt
124-
125- # Display final results
126- echo "=== Test Results ==="
127- echo "Timestamp: $timestamp"
128- echo "Pass/Fail: $pass_fail"
129- echo "Error Code: $error_code"
130- echo "Output: $output_text"
131-
132- # Exit with error if test failed
133- if [ "$pass_fail" = "FAIL" ]; then
134- echo "❌ Test failed"
135- exit 1
136- else
137- echo "🎉 Test completed successfully!"
138- fi
139-
140- - name : Upload test results artifact
141- uses : actions/upload-artifact@v4
142- if : always() # Upload artifact even if the test fails
143- with :
144- name : azure-openai-test-results
145- path : test-results/
146- retention-days : 30
17+ steps :
18+ # 1 – check out the repo so the script is available
19+ - uses : actions/checkout@v4
20+
21+ # 2 – set up Python
22+ - uses : actions/setup-python@v5
23+ with :
24+ python-version : " 3.11"
25+
26+ # 3 – install the script’s two lightweight deps
27+ - name : Install requirements
28+ run : |
29+ python -m pip install --upgrade pip
30+ pip install openai python-dotenv
31+
32+ # 4 – run the script, grade the result, assemble a report
33+ - name : Execute script and capture outcome
34+ id : test
35+ shell : bash
36+ run : |
37+ set +e # we want to handle failures ourselves
38+ TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
39+
40+ # === run the user script ===
41+ python responses-basic-aoai-v1.py > out.txt 2>&1
42+ EXIT_CODE=$?
43+
44+ # === decide pass / fail ===
45+ if [[ $EXIT_CODE -eq 0 && -s out.txt ]]; then
46+ PASS_FAIL="PASS"
47+ else
48+ PASS_FAIL="FAIL"
49+ fi
50+
51+ # === build JSON report ===
52+ jq -n \
53+ --arg date "$TIMESTAMP" \
54+ --arg output "$(cat out.txt | tr -d '\r')" \
55+ --arg pass_fail "$PASS_FAIL" \
56+ --argjson code "$EXIT_CODE" \
57+ '{test_run_date: $date,
58+ output: $output,
59+ pass_fail: $pass_fail,
60+ error_code: $code}' > aoai-test-result.json
61+
62+ # 5 – make the report downloadable from the run summary
63+ - name : Upload result artifact
64+ uses : actions/upload-artifact@v4
65+ with :
66+ name : aoai-response-test # folder name visible in the UI
67+ path : aoai-test-result.json
0 commit comments