Skip to content

Commit

Permalink
Add reload and req-resp performance tests for ing and vs (#5048)
Browse files Browse the repository at this point in the history
  • Loading branch information
vepatel committed Feb 9, 2024
1 parent 45ca309 commit 3788e4b
Show file tree
Hide file tree
Showing 7 changed files with 311 additions and 14 deletions.
34 changes: 21 additions & 13 deletions perf-tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,33 +11,41 @@ to find out about various configuration options.

### Prerequisites

- Minikube.
- Python3 (in a virtualenv)
- Any k8s platform of your choice (kind, minikube, GKE, AKS etc.)
- Python3 and Pytest (in a virtualenv)

#### Step 1 - Create a Minikube Cluster

```bash
minikube start
```
#### Step 1 - Create a cluster on platform of your choice

#### Step 2 - Run the Performance Tests

**Note**: if you have the Ingress Controller deployed in the cluster, please uninstall it first, making sure to remove
**Note**: if you already have the Ingress Controller deployed in the cluster, please uninstall it first, making sure to remove
its namespace and RBAC resources.

Run the tests:

- Use local Python3 installation (advised to use pyenv/virtualenv):

```bash
```shell
cd perf_tests
pip install -r ../tests/requirements.txt --no-deps
pytest -v -s -m ap_perf --count=<INT> --users=<INT> --hatch-rate=<INT> --time=<INT>
```

The tests will use the Ingress Controller for NGINX with the image built from `debian-image-nap-plus`. See the section
below to learn how to configure the tests including the image and the type of NGINX -- NGINX or NGINX Plus. Refer the
[Configuring the Tests](#configuring-the-tests) section for valid arguments.
For Ingress and VS performance tests:

```shell
pytest -v -s -m perf --count=<INT> --users=<INT> --hatch-rate=<INT> --time=<INT>
```

For AppProtect performance tests:

```shell
pytest -v -s -m ap_perf --count=<INT> --users=<INT> --hatch-rate=<INT> --time=<INT>
```

The tests can use the Ingress Controller for NGINX with the image built from `debian-image-nap-plus`, `debian-image-plus`
or `debian-image`.
See the section below to learn how to configure the tests including the image and the type of NGINX -- NGINX or
NGINX Plus. Refer to [Configuring the Tests](#configuring-the-tests) section for valid arguments.

## Configuring the Tests

Expand Down
2 changes: 1 addition & 1 deletion perf-tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def pytest_collection_modifyitems(config, items) -> None:
for item in items:
if "skip_for_nginx_plus" in item.keywords:
item.add_marker(skip_for_nginx_plus)
if "-ap" not in config.getoption("--image"):
if "-nap" not in config.getoption("--image"):
appprotect = pytest.mark.skip(reason="Skip AppProtect test in non-AP image")
for item in items:
if "appprotect" in item.keywords:
Expand Down
42 changes: 42 additions & 0 deletions perf-tests/suite/common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import re
import subprocess
from datetime import datetime

import requests


def collect_prom_reload_metrics(metric_list, scenario, ip, port) -> None:
req_url = f"http:https://{ip}:{port}/metrics"
resp = requests.get(req_url)
resp_decoded = resp.content.decode("utf-8")
reload_metric = ""
for line in resp_decoded.splitlines():
if "last_reload_milliseconds{class" in line:
reload_metric = re.findall(r"\d+", line)[0]
metric_list.append(
{
f"Reload time ({scenario}) ": f"{reload_metric}ms",
"TimeStamp": str(datetime.utcnow()),
}
)


def run_perf(url, setup_users, setup_rate, setup_time, resource):
subprocess.run(
[
"locust",
"-f",
f"suite/{resource}_request_perf.py",
"--headless",
"--host",
url,
"--csv",
f"{resource}_response_times",
"-u",
setup_users, # total no. of users
"-r",
setup_rate, # no. of users hatched per second
"-t",
setup_time, # locust session duration in seconds
]
)
26 changes: 26 additions & 0 deletions perf-tests/suite/ing_request_perf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import os

import yaml
from locust import HttpUser, task

host = ""


class TestResponse(HttpUser):
# locust class to be invoked
def on_start(self):
# get host from appprotect-ingress yaml before each test
ing_yaml = os.path.join(os.path.dirname(__file__), "../../tests/data/smoke/standard/smoke-ingress.yaml")
with open(ing_yaml) as f:
docs = yaml.safe_load_all(f)
for dep in docs:
self.host = dep["spec"]["rules"][0]["host"]
print("Setup finished")

@task
def send_request(self):
response = self.client.get(url="", headers={"host": self.host}, verify=False)
print(response.text)

min_wait = 400
max_wait = 1400
113 changes: 113 additions & 0 deletions perf-tests/suite/test_ingress_perf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import json

import pytest
import requests
from common import collect_prom_reload_metrics, run_perf
from settings import TEST_DATA
from suite.utils.resources_utils import (
create_example_app,
create_items_from_yaml,
create_secret_from_yaml,
delete_common_app,
delete_items_from_yaml,
delete_secret,
ensure_connection,
ensure_connection_to_public_endpoint,
wait_before_test,
wait_until_all_pods_are_ready,
)
from suite.utils.yaml_utils import get_first_ingress_host_from_yaml

reload = []


class Setup:
"""
Encapsulate the Smoke Example details.
Attributes:
public_endpoint (PublicEndpoint):
"""

def __init__(self, req_url):
self.req_url = req_url


@pytest.fixture(scope="class")
def setup(request, kube_apis, ingress_controller_endpoint, test_namespace) -> Setup:
print("------------------------- Deploy prerequisites -----------------------------------")
secret_name = create_secret_from_yaml(kube_apis.v1, test_namespace, f"{TEST_DATA}/smoke/smoke-secret.yaml")

create_example_app(kube_apis, "simple", test_namespace)
wait_until_all_pods_are_ready(kube_apis.v1, test_namespace)
req_url = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port_ssl}/backend1"
ensure_connection_to_public_endpoint(
ingress_controller_endpoint.public_ip,
ingress_controller_endpoint.port,
ingress_controller_endpoint.port_ssl,
)

def fin():
print("Clean up simple app")
delete_common_app(kube_apis, "simple", test_namespace)
delete_secret(kube_apis.v1, secret_name, test_namespace)
with open("reload_ing.json", "w+") as f:
json.dump(reload, f, ensure_ascii=False, indent=4)

request.addfinalizer(fin)
return Setup(req_url)


@pytest.fixture
def setup_users(request):
return request.config.getoption("--users")


@pytest.fixture
def setup_rate(request):
return request.config.getoption("--hatch-rate")


@pytest.fixture
def setup_time(request):
return request.config.getoption("--time")


@pytest.mark.perf
@pytest.mark.parametrize(
"ingress_controller",
[
{
"extra_args": [
f"-enable-prometheus-metrics",
]
}
],
indirect=["ingress_controller"],
)
class TestIngressPerf:
def test_perf(
self,
kube_apis,
ingress_controller_endpoint,
test_namespace,
ingress_controller,
setup,
setup_users,
setup_rate,
setup_time,
):
create_items_from_yaml(kube_apis, f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml", test_namespace)
ingress_host = get_first_ingress_host_from_yaml(f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml")
wait_before_test()
ensure_connection(setup.req_url, 200, {"host": ingress_host})
resp = requests.get(setup.req_url, headers={"host": ingress_host}, verify=False)
assert resp.status_code == 200
collect_prom_reload_metrics(
reload,
"Ingress resource",
ingress_controller_endpoint.public_ip,
ingress_controller_endpoint.metrics_port,
)
run_perf(setup.req_url, setup_users, setup_rate, setup_time, "ing")
delete_items_from_yaml(kube_apis, f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml", test_namespace)
80 changes: 80 additions & 0 deletions perf-tests/suite/test_vs_perf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
import json
import re
import subprocess
from datetime import datetime

import pytest
import requests
from common import collect_prom_reload_metrics, run_perf
from suite.utils.resources_utils import wait_before_test

reload = []


@pytest.fixture(scope="class")
def collect(request, kube_apis, ingress_controller_endpoint, test_namespace) -> None:
def fin():
with open("reload_vs.json", "w+") as f:
json.dump(reload, f, ensure_ascii=False, indent=4)

request.addfinalizer(fin)


@pytest.fixture
def setup_users(request):
return request.config.getoption("--users")


@pytest.fixture
def setup_rate(request):
return request.config.getoption("--hatch-rate")


@pytest.fixture
def setup_time(request):
return request.config.getoption("--time")


@pytest.mark.perf
@pytest.mark.parametrize(
"crd_ingress_controller, virtual_server_setup",
[
(
{
"type": "complete",
"extra_args": [f"-enable-custom-resources", f"-enable-prometheus-metrics"],
},
{
"example": "virtual-server",
"app_type": "simple",
},
)
],
indirect=True,
)
class TestVirtualServerPerf:
def test_vs_perf(
self,
kube_apis,
ingress_controller_endpoint,
crd_ingress_controller,
virtual_server_setup,
collect,
setup_rate,
setup_time,
setup_users,
):
wait_before_test()
resp = requests.get(
virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host},
)
assert resp.status_code == 200
collect_prom_reload_metrics(
reload,
"VS resource",
ingress_controller_endpoint.public_ip,
ingress_controller_endpoint.metrics_port,
)

run_perf(virtual_server_setup.backend_1_url, setup_users, setup_rate, setup_time, "vs")
28 changes: 28 additions & 0 deletions perf-tests/suite/vs_request_perf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import os

import yaml
from locust import HttpUser, task

host = ""


class TestResponse(HttpUser):
# locust class to be invoked
def on_start(self):
# get host from appprotect-ingress yaml before each test
ing_yaml = os.path.join(
os.path.dirname(__file__), "../../tests/data/virtual-server/standard/virtual-server.yaml"
)
with open(ing_yaml) as f:
docs = yaml.safe_load_all(f)
for dep in docs:
self.host = dep["spec"]["host"]
print("Setup finished")

@task
def send_request(self):
response = self.client.get(url="", headers={"host": self.host}, verify=False)
print(response.text)

min_wait = 400
max_wait = 1400

0 comments on commit 3788e4b

Please sign in to comment.