|
1 |
| -from multiprocessing import Process |
| 1 | +import logging |
2 | 2 | import socket
|
| 3 | +import ssl |
| 4 | +import threading |
3 | 5 | import time
|
4 | 6 | import pytest
|
5 | 7 | from pytest_httpserver import HTTPServer
|
6 | 8 | import requests
|
7 |
| -from app import app |
8 | 9 | from tests.functional.backend_api.app_config import AppConfig
|
| 10 | +from threading import Thread |
| 11 | +import trustme |
| 12 | +import importlib |
| 13 | +from app import app as flask_app |
| 14 | +import app |
9 | 15 |
|
10 | 16 |
|
11 |
| -@pytest.fixture(scope="module") |
12 |
| -def app_port() -> int: |
13 |
| - print("Getting free port") |
14 |
| - return get_free_port() |
| 17 | +@pytest.fixture(scope="session") |
| 18 | +def ca(): |
| 19 | + return trustme.CA() |
15 | 20 |
|
16 | 21 |
|
17 |
| -@pytest.fixture(scope="module") |
18 |
| -def app_url(app_port: int) -> int: |
19 |
| - return f"http://localhost:{app_port}" |
| 22 | +@pytest.fixture(scope="session") |
| 23 | +def httpserver_ssl_context(ca): |
| 24 | + context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) |
| 25 | + localhost_cert = ca.issue_cert("localhost") |
| 26 | + localhost_cert.configure_cert(context) |
| 27 | + return context |
20 | 28 |
|
21 | 29 |
|
22 |
| -@pytest.fixture(scope="module") |
23 |
| -def mock_httpserver(make_httpserver): |
24 |
| - """ |
25 |
| - This is required as the default pytest httpserver fixture is scoped at the function level |
26 |
| - """ |
27 |
| - print("Starting HTTP Mock Server") |
28 |
| - server = make_httpserver |
29 |
| - yield server |
30 |
| - print("Stopping HTTP Mock Server") |
31 |
| - server.clear() |
| 30 | +@pytest.fixture(scope="session") |
| 31 | +def httpclient_ssl_context(ca): |
| 32 | + with ca.cert_pem.tempfile() as ca_temp_path: |
| 33 | + return ssl.create_default_context(cafile=ca_temp_path) |
| 34 | + |
| 35 | + |
| 36 | +@pytest.fixture(scope="session") |
| 37 | +def app_port() -> int: |
| 38 | + logging.info("Getting free port") |
| 39 | + return get_free_port() |
32 | 40 |
|
33 | 41 |
|
34 |
| -@pytest.fixture(scope="module") |
35 |
| -def app_config(mock_httpserver: HTTPServer) -> AppConfig: |
36 |
| - return AppConfig({"AZURE_OPENAI_ENDPOINT": mock_httpserver.url_for("/")}) |
| 42 | +@pytest.fixture(scope="session") |
| 43 | +def app_url(app_port: int) -> int: |
| 44 | + return f"http://localhost:{app_port}" |
37 | 45 |
|
38 | 46 |
|
39 |
| -@pytest.fixture(scope="module", autouse=True) |
| 47 | +@pytest.fixture(scope="session") |
| 48 | +def app_config(make_httpserver, ca): |
| 49 | + logging.info("Creating APP CONFIG") |
| 50 | + with ca.cert_pem.tempfile() as ca_temp_path: |
| 51 | + app_config = AppConfig( |
| 52 | + { |
| 53 | + "AZURE_OPENAI_ENDPOINT": f"https://localhost:{make_httpserver.port}", |
| 54 | + "AZURE_SEARCH_SERVICE": f"https://localhost:{make_httpserver.port}", |
| 55 | + "AZURE_CONTENT_SAFETY_ENDPOINT": f"https://localhost:{make_httpserver.port}", |
| 56 | + "SSL_CERT_FILE": ca_temp_path, |
| 57 | + "CURL_CA_BUNDLE": ca_temp_path, |
| 58 | + } |
| 59 | + ) |
| 60 | + logging.info(f"Created app config: {app_config.get_all()}") |
| 61 | + yield app_config |
| 62 | + |
| 63 | + |
| 64 | +@pytest.fixture(scope="session", autouse=True) |
40 | 65 | def manage_app(app_port: int, app_config: AppConfig):
|
41 | 66 | app_config.apply_to_environment()
|
42 |
| - app_process = start_app(app_port) |
| 67 | + start_app(app_port) |
43 | 68 | yield
|
44 |
| - stop_app(app_process) |
45 | 69 | app_config.remove_from_environment()
|
46 | 70 |
|
47 | 71 |
|
48 |
| -def start_app(port: int) -> Process: |
49 |
| - print(f"Starting application on port {port}") |
50 |
| - proc = Process(target=app.run, kwargs={"port": port, "debug": True}) |
51 |
| - proc.start() |
52 |
| - wait_for_app(port) |
53 |
| - print("Application started") |
54 |
| - return proc |
| 72 | +@pytest.fixture(scope="function", autouse=True) |
| 73 | +def setup_default_mocking(httpserver: HTTPServer, app_config: AppConfig): |
| 74 | + httpserver.expect_request( |
| 75 | + f"/openai/deployments/{app_config.get('AZURE_OPENAI_EMBEDDING_MODEL')}/embeddings", |
| 76 | + query_string="api-version=2023-12-01-preview", |
| 77 | + method="POST", |
| 78 | + ).respond_with_json( |
| 79 | + { |
| 80 | + "object": "list", |
| 81 | + "data": [ |
| 82 | + { |
| 83 | + "object": "embedding", |
| 84 | + "embedding": [0.018990106880664825, -0.0073809814639389515], |
| 85 | + "index": 0, |
| 86 | + } |
| 87 | + ], |
| 88 | + "model": "text-embedding-ada-002", |
| 89 | + } |
| 90 | + ) |
| 91 | + |
| 92 | + httpserver.expect_request( |
| 93 | + "/indexes('conversations')", |
| 94 | + query_string="api-version=2023-11-01", |
| 95 | + method="GET", |
| 96 | + ).respond_with_json({}) |
| 97 | + |
| 98 | + httpserver.expect_request( |
| 99 | + "/contentsafety/text:analyze", |
| 100 | + query_string="api-version=2023-10-01", |
| 101 | + method="POST", |
| 102 | + ).respond_with_json( |
| 103 | + { |
| 104 | + "blocklistsMatch": [], |
| 105 | + "categoriesAnalysis": [], |
| 106 | + } |
| 107 | + ) |
| 108 | + |
| 109 | + httpserver.expect_request( |
| 110 | + f"/openai/deployments/{app_config.get('AZURE_OPENAI_MODEL')}/chat/completions", |
| 111 | + query_string="api-version=2023-12-01-preview", |
| 112 | + method="POST", |
| 113 | + ).respond_with_json( |
| 114 | + { |
| 115 | + "id": "chatcmpl-6v7mkQj980V1yBec6ETrKPRqFjNw9", |
| 116 | + "object": "chat.completion", |
| 117 | + "created": 1679072642, |
| 118 | + "model": "gpt-35-turbo", |
| 119 | + "usage": { |
| 120 | + "prompt_tokens": 58, |
| 121 | + "completion_tokens": 68, |
| 122 | + "total_tokens": 126, |
| 123 | + }, |
| 124 | + "choices": [ |
| 125 | + { |
| 126 | + "message": { |
| 127 | + "role": "assistant", |
| 128 | + "content": "42 is the meaning of life", |
| 129 | + }, |
| 130 | + "finish_reason": "stop", |
| 131 | + "index": 0, |
| 132 | + } |
| 133 | + ], |
| 134 | + } |
| 135 | + ) |
| 136 | + |
| 137 | + httpserver.expect_request( |
| 138 | + "/indexes('conversations')/docs/search.index", |
| 139 | + query_string="api-version=2023-11-01", |
| 140 | + method="POST", |
| 141 | + ).respond_with_json( |
| 142 | + { |
| 143 | + "value": [ |
| 144 | + {"key": "1", "status": True, "errorMessage": None, "statusCode": 201} |
| 145 | + ] |
| 146 | + } |
| 147 | + ) |
55 | 148 |
|
| 149 | + yield |
56 | 150 |
|
57 |
| -def stop_app(proc: Process): |
58 |
| - print("Shutting down application") |
59 |
| - proc.terminate() |
60 |
| - proc.join() # Wait until the process is fully shut down |
61 |
| - print("Application shut down") |
| 151 | + httpserver.check() |
62 | 152 |
|
63 | 153 |
|
64 |
| -def wait_for_app(port: int): |
65 |
| - attempts = 0 |
| 154 | +def start_app(app_port: int) -> Thread: |
| 155 | + logging.info(f"Starting application on port {app_port}") |
| 156 | + # ensure app is reloaded now that new environment variables are set |
| 157 | + importlib.reload(app) |
| 158 | + app_process = threading.Thread(target=lambda: flask_app.run(port=app_port)) |
| 159 | + app_process.daemon = True |
| 160 | + app_process.start() |
| 161 | + wait_for_app(app_port) |
| 162 | + logging.info("Application started") |
| 163 | + return app_process |
| 164 | + |
66 | 165 |
|
| 166 | +def wait_for_app(port: int, initial_check_delay: int = 10): |
| 167 | + attempts = 0 |
| 168 | + time.sleep(initial_check_delay) |
67 | 169 | while attempts < 10:
|
68 | 170 | try:
|
69 | 171 | response = requests.get(f"http://localhost:{port}/api/config")
|
|
0 commit comments