diff --git a/.github/workflows/registry-rest-api-tests.yml b/.github/workflows/registry-rest-api-tests.yml index 5eddd68a539..cd679c7044b 100644 --- a/.github/workflows/registry-rest-api-tests.yml +++ b/.github/workflows/registry-rest-api-tests.yml @@ -145,8 +145,8 @@ jobs: - name: Setup and Run Registry Rest API tests run: | echo "Running Registry REST API tests..." - cd sdk/python/tests/registry_rest_api_tests/ - pytest test_feast_registry.py -s + cd sdk/python + pytest tests/integration/registration/rest_api/test_registry_rest_api.py --integration -s - name: Clean up docker images if: always() diff --git a/sdk/python/tests/registry_rest_api_tests/conftest.py b/sdk/python/tests/integration/registration/rest_api/conftest.py similarity index 78% rename from sdk/python/tests/registry_rest_api_tests/conftest.py rename to sdk/python/tests/integration/registration/rest_api/conftest.py index 2a128785fd8..36c358a9aa6 100644 --- a/sdk/python/tests/registry_rest_api_tests/conftest.py +++ b/sdk/python/tests/integration/registration/rest_api/conftest.py @@ -1,9 +1,11 @@ import os +from pathlib import Path import pytest import requests from kubernetes import client, config -from support import ( + +from tests.integration.registration.rest_api.support import ( applyFeastProject, create_feast_project, create_namespace, @@ -41,6 +43,10 @@ def feast_rest_client(): config.load_kube_config() api_instance = client.CoreV1Api() + # Get the directory containing this conftest.py file + test_dir = Path(__file__).parent + resource_dir = test_dir / "resource" + # Constants and environment values namespace = "test-ns-feast-rest" credit_scoring = "credit-scoring" @@ -54,23 +60,37 @@ def feast_rest_client(): try: if not run_on_openshift: # Deploy dependencies - deploy_and_validate_pod(namespace, "resource/redis.yaml", "app=redis") - deploy_and_validate_pod(namespace, "resource/postgres.yaml", "app=postgres") + deploy_and_validate_pod( + namespace, str(resource_dir / "redis.yaml"), "app=redis" + ) + deploy_and_validate_pod( + namespace, str(resource_dir / "postgres.yaml"), "app=postgres" + ) # Create and validate FeatureStore CRs create_feast_project( - "resource/feast_config_credit_scoring.yaml", namespace, credit_scoring + str(resource_dir / "feast_config_credit_scoring.yaml"), + namespace, + credit_scoring, ) validate_feature_store_cr_status(namespace, credit_scoring) create_feast_project( - "resource/feast_config_driver_ranking.yaml", namespace, driver_ranking + str(resource_dir / "feast_config_driver_ranking.yaml"), + namespace, + driver_ranking, ) validate_feature_store_cr_status(namespace, driver_ranking) # Deploy ingress and get route URL run_kubectl_command( - ["apply", "-f", "resource/feast-registry-nginx.yaml", "-n", namespace] + [ + "apply", + "-f", + str(resource_dir / "feast-registry-nginx.yaml"), + "-n", + namespace, + ] ) ingress_host = run_kubectl_command( [ @@ -114,7 +134,7 @@ def feast_rest_client(): aws_secret_key, aws_bucket, registry_path, - "resource/feast_config_rhoai.yaml", + str(resource_dir / "feast_config_rhoai.yaml"), namespace, ) validate_feature_store_cr_status(namespace, "test-s3") diff --git a/sdk/python/tests/registry_rest_api_tests/resource/feast-registry-nginx.yaml b/sdk/python/tests/integration/registration/rest_api/resource/feast-registry-nginx.yaml similarity index 100% rename from sdk/python/tests/registry_rest_api_tests/resource/feast-registry-nginx.yaml rename to sdk/python/tests/integration/registration/rest_api/resource/feast-registry-nginx.yaml diff --git a/sdk/python/tests/registry_rest_api_tests/resource/feast_config_credit_scoring.yaml b/sdk/python/tests/integration/registration/rest_api/resource/feast_config_credit_scoring.yaml similarity index 100% rename from sdk/python/tests/registry_rest_api_tests/resource/feast_config_credit_scoring.yaml rename to sdk/python/tests/integration/registration/rest_api/resource/feast_config_credit_scoring.yaml diff --git a/sdk/python/tests/registry_rest_api_tests/resource/feast_config_driver_ranking.yaml b/sdk/python/tests/integration/registration/rest_api/resource/feast_config_driver_ranking.yaml similarity index 100% rename from sdk/python/tests/registry_rest_api_tests/resource/feast_config_driver_ranking.yaml rename to sdk/python/tests/integration/registration/rest_api/resource/feast_config_driver_ranking.yaml diff --git a/sdk/python/tests/registry_rest_api_tests/resource/feast_config_rhoai.yaml b/sdk/python/tests/integration/registration/rest_api/resource/feast_config_rhoai.yaml similarity index 100% rename from sdk/python/tests/registry_rest_api_tests/resource/feast_config_rhoai.yaml rename to sdk/python/tests/integration/registration/rest_api/resource/feast_config_rhoai.yaml diff --git a/sdk/python/tests/registry_rest_api_tests/resource/postgres.yaml b/sdk/python/tests/integration/registration/rest_api/resource/postgres.yaml similarity index 100% rename from sdk/python/tests/registry_rest_api_tests/resource/postgres.yaml rename to sdk/python/tests/integration/registration/rest_api/resource/postgres.yaml diff --git a/sdk/python/tests/registry_rest_api_tests/resource/redis.yaml b/sdk/python/tests/integration/registration/rest_api/resource/redis.yaml similarity index 100% rename from sdk/python/tests/registry_rest_api_tests/resource/redis.yaml rename to sdk/python/tests/integration/registration/rest_api/resource/redis.yaml diff --git a/sdk/python/tests/registry_rest_api_tests/support.py b/sdk/python/tests/integration/registration/rest_api/support.py similarity index 100% rename from sdk/python/tests/registry_rest_api_tests/support.py rename to sdk/python/tests/integration/registration/rest_api/support.py diff --git a/sdk/python/tests/registry_rest_api_tests/test_feast_registry.py b/sdk/python/tests/integration/registration/rest_api/test_registry_rest_api.py similarity index 84% rename from sdk/python/tests/registry_rest_api_tests/test_feast_registry.py rename to sdk/python/tests/integration/registration/rest_api/test_registry_rest_api.py index 04e86618cf7..5d37d700a64 100644 --- a/sdk/python/tests/registry_rest_api_tests/test_feast_registry.py +++ b/sdk/python/tests/integration/registration/rest_api/test_registry_rest_api.py @@ -13,7 +13,7 @@ # Test Configuration Constants @dataclass(frozen=True) -class TestConfig: +class RegistryTestConfig: """Configuration constants for registry REST API tests.""" CREDIT_SCORING_PROJECT = "credit_scoring_local" @@ -103,7 +103,7 @@ def validate_entity_structure(entity: Dict[str, Any]) -> None: assert "lastUpdatedTimestamp" in meta assert isinstance(entity["project"], str) - assert entity["project"] in TestConfig.PROJECT_NAMES + assert entity["project"] in RegistryTestConfig.PROJECT_NAMES @staticmethod def validate_feature_structure(feature: Dict[str, Any]) -> None: @@ -132,6 +132,7 @@ def validate_batch_source(batch_source: Dict[str, Any]) -> None: assert batch_source.get("type") == "BATCH_FILE" +@pytest.mark.integration @pytest.mark.skipif( not os.path.exists(os.path.expanduser("~/.kube/config")), reason="Kube config not available in this environment", @@ -143,7 +144,7 @@ class TestRegistryServerRest: def test_list_entities(self, feast_rest_client): """Test listing entities for a specific project.""" response = feast_rest_client.get( - f"/entities/?project={TestConfig.CREDIT_SCORING_PROJECT}" + f"/entities/?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) @@ -151,21 +152,21 @@ def test_list_entities(self, feast_rest_client): assert "entities" in data entities = data["entities"] assert isinstance(entities, list) - assert len(entities) == TestConfig.CREDIT_SCORING_ENTITIES_COUNT + assert len(entities) == RegistryTestConfig.CREDIT_SCORING_ENTITIES_COUNT # Validate entity names actual_entity_names = {entity["spec"]["name"] for entity in entities} - assert actual_entity_names == TestConfig.ENTITY_NAMES + assert actual_entity_names == RegistryTestConfig.ENTITY_NAMES # Validate pagination APITestHelpers.validate_pagination( - data, TestConfig.CREDIT_SCORING_ENTITIES_COUNT + data, RegistryTestConfig.CREDIT_SCORING_ENTITIES_COUNT ) def test_get_entity(self, feast_rest_client): """Test getting a specific entity with detailed validation.""" response = feast_rest_client.get( - f"/entities/zipcode/?project={TestConfig.CREDIT_SCORING_PROJECT}" + f"/entities/zipcode/?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) @@ -178,7 +179,7 @@ def test_get_entity(self, feast_rest_client): spec["description"] == "ZIP code identifier for geographic location-based features" ) - assert spec["tags"] == TestConfig.ZIPCODE_SPEC_TAGS + assert spec["tags"] == RegistryTestConfig.ZIPCODE_SPEC_TAGS # Validate meta meta = data["meta"] @@ -215,22 +216,22 @@ def test_entities_all(self, feast_rest_client): def test_list_data_sources(self, feast_rest_client): """Test listing data sources for a specific project.""" response = feast_rest_client.get( - f"/data_sources/?project={TestConfig.CREDIT_SCORING_PROJECT}" + f"/data_sources/?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) assert "dataSources" in data data_sources = data["dataSources"] - assert len(data_sources) == TestConfig.CREDIT_SCORING_DATA_SOURCES_COUNT + assert len(data_sources) == RegistryTestConfig.CREDIT_SCORING_DATA_SOURCES_COUNT APITestHelpers.validate_pagination( - data, TestConfig.CREDIT_SCORING_DATA_SOURCES_COUNT + data, RegistryTestConfig.CREDIT_SCORING_DATA_SOURCES_COUNT ) def test_get_data_sources(self, feast_rest_client): """Test getting a specific data source.""" response = feast_rest_client.get( - f"/data_sources/Zipcode source/?project={TestConfig.CREDIT_SCORING_PROJECT}" + f"/data_sources/Zipcode source/?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) @@ -254,7 +255,7 @@ def test_data_sources_all(self, feast_rest_client): # Validate project associations for relevant data source types for ds in data_sources: if ds["type"] in ("BATCH_FILE", "REQUEST_SOURCE"): - assert ds["project"] in TestConfig.PROJECT_NAMES + assert ds["project"] in RegistryTestConfig.PROJECT_NAMES pagination = data.get("pagination", {}) assert pagination.get("page") == 1 @@ -266,12 +267,15 @@ def test_data_sources_all(self, feast_rest_client): def test_list_feature_services(self, feast_rest_client): """Test listing feature services for a specific project.""" response = feast_rest_client.get( - f"/feature_services/?project={TestConfig.DRIVER_RANKING_PROJECT}" + f"/feature_services/?project={RegistryTestConfig.DRIVER_RANKING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) feature_services = data.get("featureServices", []) - assert len(feature_services) == TestConfig.DRIVER_RANKING_FEATURE_SERVICES_COUNT + assert ( + len(feature_services) + == RegistryTestConfig.DRIVER_RANKING_FEATURE_SERVICES_COUNT + ) # Validate batch sources in features for fs in feature_services: @@ -288,7 +292,7 @@ def test_feature_services_all(self, feast_rest_client): assert len(feature_services) >= 1 for fs in feature_services: - assert fs.get("project") in TestConfig.PROJECT_NAMES + assert fs.get("project") in RegistryTestConfig.PROJECT_NAMES # Validate features structure spec = fs.get("spec", {}) @@ -299,7 +303,7 @@ def test_feature_services_all(self, feast_rest_client): def test_get_feature_services(self, feast_rest_client): """Test getting a specific feature service.""" response = feast_rest_client.get( - f"/feature_services/driver_activity_v2/?project={TestConfig.DRIVER_RANKING_PROJECT}" + f"/feature_services/driver_activity_v2/?project={RegistryTestConfig.DRIVER_RANKING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) @@ -313,21 +317,22 @@ def test_get_feature_services(self, feast_rest_client): def test_list_feature_views(self, feast_rest_client): """Test listing feature views for a specific project.""" response = feast_rest_client.get( - f"/feature_views/?project={TestConfig.CREDIT_SCORING_PROJECT}" + f"/feature_views/?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) assert ( - len(data["featureViews"]) == TestConfig.CREDIT_SCORING_FEATURE_VIEWS_COUNT + len(data["featureViews"]) + == RegistryTestConfig.CREDIT_SCORING_FEATURE_VIEWS_COUNT ) APITestHelpers.validate_pagination( - data, TestConfig.CREDIT_SCORING_FEATURE_VIEWS_COUNT + data, RegistryTestConfig.CREDIT_SCORING_FEATURE_VIEWS_COUNT ) def test_get_feature_view(self, feast_rest_client): """Test getting a specific feature view.""" response = feast_rest_client.get( - f"/feature_views/credit_history/?project={TestConfig.CREDIT_SCORING_PROJECT}" + f"/feature_views/credit_history/?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) @@ -351,26 +356,26 @@ def test_feature_views_all(self, feast_rest_client): def test_list_features(self, feast_rest_client): """Test listing features for a specific project.""" response = feast_rest_client.get( - f"/features/?project={TestConfig.CREDIT_SCORING_PROJECT}&include_relationships=true" + f"/features/?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}&include_relationships=true" ) data = APITestHelpers.validate_response_success(response) features = data.get("features") assert isinstance(features, list) - assert len(features) == TestConfig.CREDIT_SCORING_FEATURES_COUNT + assert len(features) == RegistryTestConfig.CREDIT_SCORING_FEATURES_COUNT # Validate each feature structure for feature in features: APITestHelpers.validate_feature_structure(feature) APITestHelpers.validate_pagination( - data, TestConfig.CREDIT_SCORING_FEATURES_COUNT + data, RegistryTestConfig.CREDIT_SCORING_FEATURES_COUNT ) def test_get_feature(self, feast_rest_client): """Test getting a specific feature.""" response = feast_rest_client.get( - f"/features/zipcode_features/city/?project={TestConfig.CREDIT_SCORING_PROJECT}&include_relationships=false" + f"/features/zipcode_features/city/?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}&include_relationships=false" ) data = APITestHelpers.validate_response_success(response) @@ -396,14 +401,17 @@ def test_features_all(self, feast_rest_client): # Validate expected projects are present actual_projects = set(f["project"] for f in features) - assert TestConfig.PROJECT_NAMES.issubset(actual_projects) + assert RegistryTestConfig.PROJECT_NAMES.issubset(actual_projects) APITestHelpers.validate_pagination_all_endpoint(data, "features") # Project Tests @pytest.mark.parametrize( "project_name", - [TestConfig.CREDIT_SCORING_PROJECT, TestConfig.DRIVER_RANKING_PROJECT], + [ + RegistryTestConfig.CREDIT_SCORING_PROJECT, + RegistryTestConfig.DRIVER_RANKING_PROJECT, + ], ) def test_get_project_by_name(self, feast_rest_client, project_name): """Test getting a project by name.""" @@ -420,13 +428,13 @@ def test_get_projects_list(self, feast_rest_client): assert len(projects) == 2 actual_project_names = [project["spec"]["name"] for project in projects] - assert set(actual_project_names) == TestConfig.PROJECT_NAMES + assert set(actual_project_names) == RegistryTestConfig.PROJECT_NAMES # Lineage Tests def test_get_registry_lineage(self, feast_rest_client): """Test getting registry lineage for a specific project.""" response = feast_rest_client.get( - f"/lineage/registry?project={TestConfig.CREDIT_SCORING_PROJECT}" + f"/lineage/registry?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) @@ -448,11 +456,11 @@ def test_get_registry_lineage(self, feast_rest_client): def test_get_lineage_complete(self, feast_rest_client): """Test getting complete lineage for a specific project.""" response = feast_rest_client.get( - f"/lineage/complete?project={TestConfig.CREDIT_SCORING_PROJECT}" + f"/lineage/complete?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) - assert data.get("project") == TestConfig.CREDIT_SCORING_PROJECT + assert data.get("project") == RegistryTestConfig.CREDIT_SCORING_PROJECT assert "objects" in data objects = data["objects"] @@ -511,12 +519,12 @@ def test_get_registry_complete_all(self, feast_rest_client): assert len(data["projects"]) > 0 project_names = [project["project"] for project in data.get("projects", [])] - assert TestConfig.CREDIT_SCORING_PROJECT in project_names + assert RegistryTestConfig.CREDIT_SCORING_PROJECT in project_names def test_get_lineage_object_path(self, feast_rest_client): """Test getting lineage for a specific object.""" response = feast_rest_client.get( - f"/lineage/objects/entity/dob_ssn?project={TestConfig.CREDIT_SCORING_PROJECT}" + f"/lineage/objects/entity/dob_ssn?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}" ) data = APITestHelpers.validate_response_success(response) @@ -541,7 +549,7 @@ def test_get_lineage_object_path(self, feast_rest_client): def test_saved_datasets_endpoints(self, feast_rest_client, endpoint, key): """Test saved datasets endpoints with parameterization.""" if endpoint == "/saved_datasets": - url = f"{endpoint}?project={TestConfig.CREDIT_SCORING_PROJECT}&include_relationships=false" + url = f"{endpoint}?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}&include_relationships=false" else: url = f"{endpoint}?allow_cache=true&page=1&limit=50&sort_order=asc&include_relationships=false" @@ -555,11 +563,13 @@ def test_saved_datasets_endpoints(self, feast_rest_client, endpoint, key): # Extract and validate names actual_names = [ds["spec"]["name"] for ds in saved_datasets] APITestHelpers.validate_names_match( - actual_names, TestConfig.SAVED_DATASET_NAMES + actual_names, RegistryTestConfig.SAVED_DATASET_NAMES ) # Validate pagination - APITestHelpers.validate_pagination(data, TestConfig.SAVED_DATASETS_COUNT) + APITestHelpers.validate_pagination( + data, RegistryTestConfig.SAVED_DATASETS_COUNT + ) if endpoint == "/saved_datasets/all": assert data["pagination"]["page"] == 1 assert data["pagination"]["limit"] == 50 @@ -568,7 +578,7 @@ def test_get_saved_datasets_by_name(self, feast_rest_client): """Test getting a specific saved dataset by name.""" dataset_name = "comprehensive_credit_dataset_v1" response = feast_rest_client.get( - f"/saved_datasets/{dataset_name}?project={TestConfig.CREDIT_SCORING_PROJECT}&include_relationships=false" + f"/saved_datasets/{dataset_name}?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}&include_relationships=false" ) data = APITestHelpers.validate_response_success(response) @@ -580,14 +590,14 @@ def test_get_saved_datasets_by_name(self, feast_rest_client): def test_get_permission_by_name(self, feast_rest_client): """Test getting a specific permission by name.""" response = feast_rest_client.get( - f"/permissions/feast_admin_permission?project={TestConfig.CREDIT_SCORING_PROJECT}&include_relationships=false" + f"/permissions/feast_admin_permission?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}&include_relationships=false" ) APITestHelpers.validate_response_success(response) def test_list_permissions(self, feast_rest_client): """Test listing permissions for a specific project.""" response = feast_rest_client.get( - f"/permissions?project={TestConfig.CREDIT_SCORING_PROJECT}&include_relationships=false" + f"/permissions?project={RegistryTestConfig.CREDIT_SCORING_PROJECT}&include_relationships=false" ) data = APITestHelpers.validate_response_success(response) @@ -595,9 +605,9 @@ def test_list_permissions(self, feast_rest_client): # Extract and validate names actual_names = [ds["spec"]["name"] for ds in data["permissions"]] - assert len(actual_names) == len(TestConfig.PERMISSION_NAMES) + assert len(actual_names) == len(RegistryTestConfig.PERMISSION_NAMES) - for name in TestConfig.PERMISSION_NAMES: + for name in RegistryTestConfig.PERMISSION_NAMES: assert name in actual_names - APITestHelpers.validate_pagination(data, TestConfig.PERMISSIONS_COUNT) + APITestHelpers.validate_pagination(data, RegistryTestConfig.PERMISSIONS_COUNT)