diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000000..a897ef815b --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,127 @@ +# UMF (Unified Memory Framework) - AI Coding Guide + +## Project Architecture + +UMF is a C library for constructing memory allocators and pools, built around a two-layer architecture: + +- **Memory Providers** (`src/provider/`): Handle coarse-grained OS-level memory allocation (mmap, CUDA, Level Zero, etc.) +- **Memory Pools** (`src/pool/`): Handle fine-grained allocation using providers as backing store (jemalloc, scalable, disjoint) + +Key architectural patterns: +- Provider/pool separation enables mixing any provider with any pool allocator +- Operations structures (`*_ops_t`) define plugin interfaces for extensibility +- Handle-based API (`*_handle_t`) abstracts implementation details +- Result codes (`umf_result_t`) for consistent error handling + +## Development Workflows + +### Build System +```bash +# Standard build +cmake -B build -DCMAKE_BUILD_TYPE=Release +cmake --build build -j $(nproc) + +# Enable all features for development +# GPU tests will work only in an environment with proper hardware and drivers +cmake -B build -DCMAKE_BUILD_TYPE=Debug \ + -DUMF_BUILD_TESTS=ON -DUMF_BUILD_GPU_TESTS=OFF \ + -DUMF_BUILD_EXAMPLES=ON -DUMF_DEVELOPER_MODE=ON \ + -DUMF_FORMAT_CODE_STYLE=ON +``` + +### Version Management +- Version determined by: + 1. `git describe` (preferred) + 2. `VERSION` file fallback + 3. "0.0.0" default +- `set_version_variables()` in `cmake/helpers.cmake` handles version detection +- For releases: create `VERSION` file with semver format (e.g., "1.0.3") + +### Code Formatting +- **Always format code before committing**: `make format-apply` +- Requires build with `-DUMF_FORMAT_CODE_STYLE=ON` +- Uses clang-format-15.0, cmake-format-0.6, and black for Python + +### Testing Patterns +- Use `build_umf_test()` CMake function in `test/CMakeLists.txt` +- GPU tests require `UMF_BUILD_GPU_TESTS=ON` and hardware/drivers +- IPC tests use producer/consumer pattern with shell scripts +- Platform-specific tests: `.c` files for portability, `.cpp` for C++ features, utils, and selected tests + +### CI/CD Structure +- `pr_push.yml`: Main workflow calling reusable workflows. It's called for each PR change or push to main/stable branches +- Separate workflows for different configurations: `reusable_gpu.yml`, `reusable_sanitizers.yml`, etc. +- Provider-specific testing: Level Zero, CUDA runners with actual hardware + +## Coding Conventions + +### Naming Patterns +- Public API: `umf*` prefix (e.g., `umfMemoryProviderCreate`) +- Internal functions: `snake_case` without prefix +- Structures: `*_t` suffix for types, `*_handle_t` for opaque handles +- Constants: `UMF_*` uppercase with underscores + +### Memory Management Patterns +- Always pair create/destroy functions (e.g., `umfMemoryProviderCreate`/`umfMemoryProviderDestroy`) +- Use `umf_result_t` return codes, never throw exceptions +- Provider params have separate create/destroy lifecycle +- Thread-local storage (`__TLS`) for error state in providers + +### Provider Implementation Pattern +```c +// Standard provider structure +typedef struct my_provider_t { + // Provider-specific state +} my_provider_t; + +static umf_result_t my_initialize(const void *params, void **provider); +static umf_result_t my_finalize(void *provider); +static umf_result_t my_alloc(void *provider, size_t size, size_t alignment, void **ptr); +static umf_result_t my_free(void *provider, void *ptr, size_t size); + +static const umf_memory_provider_ops_t MY_PROVIDER_OPS = { + .version = UMF_PROVIDER_OPS_VERSION_CURRENT, + .initialize = my_initialize, + .finalize = my_finalize, + .alloc = my_alloc, + .free = my_free, + // ... other required ops +}; +``` + +## Key Files and Patterns + +### Core APIs +- `include/umf.h`: Main header, include this for basic usage +- `include/umf/memory_provider_ops.h`: Provider plugin interface +- `include/umf/memory_pool_ops.h`: Pool plugin interface + +### Common Utilities +- `src/utils/`: Logging (`utils_log.h`), concurrency (`utils_concurrency.h`), assertions +- `src/critnib/`: Concurrent radix tree for address tracking +- `src/base_alloc/`: Base allocation utilities + +### Platform Abstractions +- `libumf_linux.c`/`libumf_windows.c`: OS-specific implementations +- `topology.c`: HWLOC integration for NUMA topology discovery +- Provider files handle platform-specific allocation (CUDA, Level Zero, OS memory) + +## Integration Points + +### NUMA Support +- Uses HWLOC for topology discovery (`topology.c`, `umf_hwloc.h`) +- NUMA policies in `mempolicy.c`: bind, interleave, split modes +- Memory spaces (`memspace.c`) and targets (`memtarget.c`) for NUMA abstraction + +### GPU Integration +- Level Zero provider: `provider_level_zero.c` for Intel GPUs +- CUDA provider: `provider_cuda.c` for NVIDIA GPUs +- Examples in `examples/level_zero_shared_memory/` and `examples/cuda_shared_memory/` + +### IPC (Inter-Process Communication) +- Linux-specific implementation using file descriptor passing +- Requires `PTRACE_MODE_ATTACH_REALCREDS` permission +- Uses `memfd_create()` or `memfd_secret()` for anonymous shared memory + +When implementing new providers or pools, follow the existing patterns in +`src/provider/provider_os_memory.c` and `src/pool/pool_scalable.c` as reference implementations. diff --git a/.github/dependabot.yml b/.github/dependabot.yml index c24160fbb4..26262ec8a7 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,7 +4,7 @@ updates: - package-ecosystem: "pip" directory: "/third_party" # Location of package manifests schedule: - interval: "daily" + interval: "monthly" ignore: - dependency-name: "clang-format" - dependency-name: "cmake-format" @@ -17,7 +17,7 @@ updates: - package-ecosystem: "github-actions" directory: "/.github/workflows/" schedule: - interval: "daily" + interval: "monthly" groups: actions-dependencies: applies-to: version-updates diff --git a/.github/docker/alpine-3.21.Dockerfile b/.github/docker/alpine-3.21.Dockerfile new file mode 100644 index 0000000000..fef9810703 --- /dev/null +++ b/.github/docker/alpine-3.21.Dockerfile @@ -0,0 +1,46 @@ +# Copyright (C) 2025 Intel Corporation +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# +# Dockerfile - a 'recipe' for Docker to build an image of Alpine +# environment for building the Unified Memory Framework project. +# + +# Pull base Alpine image version 3.21 +FROM registry.hub.docker.com/library/alpine@sha256:a8560b36e8b8210634f77d9f7f9efd7ffa463e380b75e2e74aff4511df3ef88c + +# Set environment variables +ENV OS=alpine +ENV OS_VER=3.21 + +# Base development packages +ARG BASE_DEPS="\ + bash \ + cmake \ + git \ + g++ \ + make \ + sudo" + +# UMF's dependencies +ARG UMF_DEPS="\ + hwloc-dev" + +# Dependencies for tests +ARG TEST_DEPS="\ + numactl-dev" + +# Update and install required packages +RUN apk update \ + && apk add --no-cache \ + ${BASE_DEPS} \ + ${TEST_DEPS} \ + ${UMF_DEPS} + +# Add a new (non-root) 'test_user' +ENV USER=test_user +RUN adduser -D -G wheel ${USER} +RUN echo '%wheel ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers + +USER test_user diff --git a/.github/scripts/alpine_build.sh b/.github/scripts/alpine_build.sh new file mode 100755 index 0000000000..4bfdb44616 --- /dev/null +++ b/.github/scripts/alpine_build.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright (C) 2025 Intel Corporation +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# alpine_build.sh - Script for building UMF on Alpine image + +set -e + +UMF_BUILD_TYPE=$1 +WORKDIR=$2 + +sudo chown $USER $WORKDIR +cd unified-memory-framework + +cmake -B build -DCMAKE_BUILD_TYPE=$UMF_BUILD_TYPE -DUMF_BUILD_TESTS=ON -DUMF_BUILD_EXAMPLES=ON +cmake --build build diff --git a/.github/workflows/.spellcheck-conf.toml b/.github/workflows/.spellcheck-conf.toml index 288af6a19d..bb0f480d69 100644 --- a/.github/workflows/.spellcheck-conf.toml +++ b/.github/workflows/.spellcheck-conf.toml @@ -1,6 +1,6 @@ [default] # Don't correct the following words: -extend-ignore-words-re = ["ASSER", "Tne", "ba", "BA", "PN"] +extend-ignore-words-re = ["ASSER", "Tne", "ba", "BA", "PN", "usm"] [files] # completely exclude those files from consideration: diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index a087d11978..22394723ef 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: ref: ${{ github.ref }} fetch-depth: 0 diff --git a/.github/workflows/detect_changes.yml b/.github/workflows/detect_changes.yml index 4c1c9d4bfe..3458c4e4d3 100644 --- a/.github/workflows/detect_changes.yml +++ b/.github/workflows/detect_changes.yml @@ -21,13 +21,13 @@ jobs: changed_files: ${{ steps.changed-files.outputs.all_changed_files }} steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Get changed files id: changed-files - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c #v46.0.5 + uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 #v47.0.0 - name: List all changed files env: diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index aaaae6ddce..3f13e8d133 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -27,7 +27,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -79,7 +79,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -126,12 +126,12 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Restore vcpkg cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: cache with: path: vcpkg_pkgs_cache.zip @@ -243,7 +243,7 @@ jobs: - name: Save vcpkg cache if: steps.cache.outputs.cache-hit != 'true' - uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: ${{github.workspace}}/vcpkg_pkgs_cache.zip key: ${{ steps.cache.outputs.cache-primary-key }} @@ -268,12 +268,12 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Restore vcpkg cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: cache with: path: vcpkg_pkgs_cache.zip @@ -354,7 +354,7 @@ jobs: - name: Save vcpkg cache if: steps.cache.outputs.cache-hit != 'true' - uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: ${{github.workspace}}/vcpkg_pkgs_cache.zip key: ${{ steps.cache.outputs.cache-primary-key }} @@ -380,7 +380,7 @@ jobs: run: sudo apt-get install -y libnuma-dev - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -422,7 +422,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -466,7 +466,6 @@ jobs: with: provider: "LEVEL_ZERO" runner: "L0-BMG" - os: "['Ubuntu']" CUDA: uses: ./.github/workflows/reusable_gpu.yml with: @@ -495,7 +494,13 @@ jobs: # Run benchmarks with the latest SYCL (with the latest UMF copied into the SYCL) # to verify the compatibility. + # + # TODO: re-enable this job, when nightly sycl builds are again available; + # the last one available (as of 24.07.2025) is not working properly with + # compute benchmarks. Now, we could only build sycl from sources, or find a + # matching version of compute benchmarks with last nightly package. Benchmarks-sycl: + if: false uses: ./.github/workflows/reusable_benchmarks.yml permissions: contents: write @@ -513,3 +518,28 @@ jobs: SYCL: uses: ./.github/workflows/reusable_sycl.yml + + alpine: + name: Alpine + env: + HOST_WORKDIR: ${{github.workspace}} + WORKDIR: /unified-memory-framework + strategy: + matrix: + build_type: [Debug, Release] + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + with: + fetch-depth: 0 + + - name: Build Alpine image + run: | + docker build . -f .github/docker/alpine-3.21.Dockerfile -t umf-alpine-3.21 + + - name: Run UMF build on Alpine image + run: | + docker run --rm -i -v $HOST_WORKDIR:$WORKDIR \ + umf-alpine-3.21 $WORKDIR/.github/scripts/alpine_build.sh ${{matrix.build_type}} $WORKDIR diff --git a/.github/workflows/pr_push.yml b/.github/workflows/pr_push.yml index 52bd73756a..fffdc7373e 100644 --- a/.github/workflows/pr_push.yml +++ b/.github/workflows/pr_push.yml @@ -47,7 +47,6 @@ jobs: provider: "LEVEL_ZERO" runner: "L0-BMG" shared_lib: "['ON']" - os: "['Ubuntu']" CUDA: needs: [Build] uses: ./.github/workflows/reusable_gpu.yml @@ -99,6 +98,6 @@ jobs: uses: ./.github/workflows/reusable_compatibility.yml strategy: matrix: - tag: ["v1.0.0"] + tag: ["v1.0.1"] with: tag: ${{matrix.tag}} diff --git a/.github/workflows/reusable_basic.yml b/.github/workflows/reusable_basic.yml index 7980e29397..7299cedabd 100644 --- a/.github/workflows/reusable_basic.yml +++ b/.github/workflows/reusable_basic.yml @@ -30,7 +30,6 @@ jobs: level_zero_provider: ['ON'] cuda_provider: ['ON'] install_tbb: ['ON'] - disable_hwloc: ['OFF'] link_hwloc_statically: ['OFF'] cmake_ver: ['default'] include: @@ -41,7 +40,6 @@ jobs: level_zero_provider: 'ON' cuda_provider: 'ON' install_tbb: 'ON' - disable_hwloc: 'OFF' link_hwloc_statically: 'OFF' # check minimum supported cmake version cmake_ver: '3.14.0' @@ -52,7 +50,6 @@ jobs: level_zero_provider: 'ON' cuda_provider: 'ON' install_tbb: 'ON' - disable_hwloc: 'OFF' link_hwloc_statically: 'OFF' cmake_ver: '3.28.0' - os: ubuntu-24.04 @@ -62,7 +59,6 @@ jobs: level_zero_provider: 'ON' cuda_provider: 'ON' install_tbb: 'ON' - disable_hwloc: 'OFF' link_hwloc_statically: 'OFF' cmake_ver: 'default' # test level_zero_provider='OFF' and cuda_provider='OFF' @@ -73,7 +69,6 @@ jobs: level_zero_provider: 'OFF' cuda_provider: 'OFF' install_tbb: 'ON' - disable_hwloc: 'OFF' link_hwloc_statically: 'OFF' cmake_ver: 'default' # test icx compiler @@ -84,7 +79,6 @@ jobs: level_zero_provider: 'ON' cuda_provider: 'ON' install_tbb: 'ON' - disable_hwloc: 'OFF' link_hwloc_statically: 'OFF' cmake_ver: 'default' # test lld linker @@ -95,7 +89,6 @@ jobs: level_zero_provider: 'ON' cuda_provider: 'ON' install_tbb: 'ON' - disable_hwloc: 'OFF' link_hwloc_statically: 'OFF' llvm_linker: '-DCMAKE_EXE_LINKER_FLAGS="-fuse-ld=lld" -DCMAKE_MODULE_LINKER_FLAGS="-fuse-ld=lld" -DCMAKE_SHARED_LINKER_FLAGS="-fuse-ld=lld"' cmake_ver: 'default' @@ -107,17 +100,6 @@ jobs: level_zero_provider: 'ON' cuda_provider: 'ON' install_tbb: 'OFF' - disable_hwloc: 'OFF' - link_hwloc_statically: 'OFF' - cmake_ver: 'default' - - os: ubuntu-22.04 - build_type: Debug - compiler: {c: gcc, cxx: g++} - shared_library: 'ON' - level_zero_provider: 'ON' - cuda_provider: 'ON' - install_tbb: 'ON' - disable_hwloc: 'ON' link_hwloc_statically: 'OFF' cmake_ver: 'default' - os: ubuntu-22.04 @@ -127,14 +109,13 @@ jobs: level_zero_provider: 'ON' cuda_provider: 'ON' install_tbb: 'ON' - disable_hwloc: 'OFF' link_hwloc_statically: 'ON' cmake_ver: 'default' - name: Basic (${{matrix.os}}, build_type=${{matrix.build_type}}, compilers=${{matrix.compiler.c}}/${{matrix.compiler.cxx}}, shared_library=${{matrix.shared_library}}, level_zero_provider=${{matrix.level_zero_provider}}, cuda_provider=${{matrix.cuda_provider}}, install_tbb=${{matrix.install_tbb}}, disable_hwloc=${{matrix.disable_hwloc}}, link_hwloc_statically=${{matrix.link_hwloc_statically}}, cmake_ver=${{matrix.cmake_ver}}) + name: Basic (${{matrix.os}}, build_type=${{matrix.build_type}}, compilers=${{matrix.compiler.c}}/${{matrix.compiler.cxx}}, shared_library=${{matrix.shared_library}}, level_zero_provider=${{matrix.level_zero_provider}}, cuda_provider=${{matrix.cuda_provider}}, install_tbb=${{matrix.install_tbb}}, link_hwloc_statically=${{matrix.link_hwloc_statically}}, cmake_ver=${{matrix.cmake_ver}}) steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -146,12 +127,6 @@ jobs: chmod +x cmake-${{matrix.cmake_ver}}-Linux-x86_64.sh echo ${USERPASS} | sudo -Sk ./cmake-${{matrix.cmake_ver}}-Linux-x86_64.sh --skip-license --prefix=/usr/local - - name: Uninstall hwloc - if: matrix.disable_hwloc == 'ON' - run: | - echo ${USERPASS} | sudo -Sk apt-get remove --purge -y '*hwloc*' - echo ${USERPASS} | sudo -Sk apt-get autoremove -y - - name: Uninstall TBB apt package if: matrix.install_tbb == 'OFF' run: | @@ -185,7 +160,6 @@ jobs: -DUMF_DEVELOPER_MODE=ON -DUMF_BUILD_LIBUMF_POOL_JEMALLOC=ON -DUMF_TESTS_FAIL_ON_SKIP=ON - -DUMF_DISABLE_HWLOC=${{matrix.disable_hwloc}} -DUMF_LINK_HWLOC_STATICALLY=${{matrix.link_hwloc_statically}} ${{ matrix.build_type == 'Debug' && matrix.compiler.c == 'gcc' && '-DUMF_USE_COVERAGE=ON' || '' }} ${{ matrix.llvm_linker || '' }} @@ -205,16 +179,16 @@ jobs: if: ${{ matrix.build_type == 'Debug' && matrix.compiler.c == 'gcc' }} working-directory: ${{env.BUILD_DIR}} run: | - export COVERAGE_FILE_NAME=${{env.COVERAGE_NAME}}-${{matrix.os}}-shared-${{matrix.shared_library}}-no_hwloc-${{matrix.disable_hwloc}} + export COVERAGE_FILE_NAME=${{env.COVERAGE_NAME}}-${{matrix.os}}-shared-${{matrix.shared_library}} echo "COVERAGE_FILE_NAME: $COVERAGE_FILE_NAME" ../scripts/coverage/coverage_capture.sh $COVERAGE_FILE_NAME mkdir -p ${{env.COVERAGE_DIR}} mv ./$COVERAGE_FILE_NAME ${{env.COVERAGE_DIR}} - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ matrix.build_type == 'Debug' && matrix.compiler.c == 'gcc' }} with: - name: ${{env.COVERAGE_NAME}}-${{matrix.os}}-shared-${{matrix.shared_library}}-no_hwloc-${{matrix.disable_hwloc}} + name: ${{env.COVERAGE_NAME}}-${{matrix.os}}-shared-${{matrix.shared_library}} path: ${{env.COVERAGE_DIR}} - name: Remove the installation directory @@ -226,7 +200,7 @@ jobs: --build-dir ${{env.BUILD_DIR}} --install-dir ${{env.INSTL_DIR}} --build-type ${{matrix.build_type}} - ${{ matrix.install_tbb == 'ON' && matrix.disable_hwloc != 'ON' && matrix.shared_library == 'ON' && '--proxy' || '' }} + ${{ matrix.install_tbb == 'ON' && matrix.shared_library == 'ON' && '--proxy' || '' }} --umf-version ${{env.UMF_VERSION}} ${{ matrix.shared_library == 'ON' && '--shared-library' || '' }} @@ -281,7 +255,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -307,12 +281,12 @@ jobs: # Ensure that the required environment is set # Note: While this step is required for the clang-cl compiler, it can be executed for all jobs - name: Setup MSVC dev command prompt - uses: TheMrMilchmann/setup-msvc-dev@fb19abb8a41b3cf0340f5d1be17d420309232be6 # v3.0.1 + uses: TheMrMilchmann/setup-msvc-dev@79dac248aac9d0059f86eae9d8b5bfab4e95e97c # v4.0.0 with: arch: x64 - name: Restore vcpkg cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: cache with: path: vcpkg_pkgs_cache.zip @@ -462,7 +436,7 @@ jobs: - name: Save vcpkg cache if: steps.cache.outputs.cache-hit != 'true' - uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: ${{github.workspace}}/vcpkg_pkgs_cache.zip key: ${{ steps.cache.outputs.cache-primary-key }} @@ -477,7 +451,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -519,7 +493,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -549,9 +523,9 @@ jobs: strategy: matrix: include: - - os: macos-13 - static_hwloc: 'OFF' - os: macos-14 + static_hwloc: 'OFF' + - os: macos-15 static_hwloc: 'ON' env: BUILD_TYPE : "Release" @@ -560,7 +534,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 diff --git a/.github/workflows/reusable_benchmarks.yml b/.github/workflows/reusable_benchmarks.yml index 45434c7d97..a6dc2f1e2f 100644 --- a/.github/workflows/reusable_benchmarks.yml +++ b/.github/workflows/reusable_benchmarks.yml @@ -59,7 +59,7 @@ jobs: echo "bench_params=$params" >> $GITHUB_ENV - name: Add comment to PR - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 if: ${{ always() && inputs.pr_no != 0 }} with: script: | @@ -76,7 +76,7 @@ jobs: }) - name: Checkout UMF - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: ${{env.UMF_DIR}} fetch-depth: 0 @@ -113,7 +113,7 @@ jobs: run: cmake --build ${{env.BUILD_DIR}} -j $(nproc) - name: Checkout UMF results branch - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: ref: benchmark-results path: results-repo @@ -121,14 +121,14 @@ jobs: # Get scripts for benchmark data visualization (from SYCL repo). # Use specific ref, as the scripts or files' location may change. - name: Checkout benchmark scripts - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: repository: intel/llvm # Note: The same ref is used in docs build (for dashboard generation)! # - # 11.07.2025 + # 30.07.2025 # branch: sycl - ref: b68f49e0a03fb63de5a3e207f0f65247964d337b + ref: 8f54710553800eec05a6fd9717b14f995a22b137 path: sc sparse-checkout: | devops/scripts/benchmarks @@ -214,7 +214,7 @@ jobs: run: cat ${{ github.workspace }}/benchmark_results.md || true - name: Add comment to PR - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 if: ${{ always() && inputs.pr_no != 0 }} with: script: | diff --git a/.github/workflows/reusable_checks.yml b/.github/workflows/reusable_checks.yml index e27807de94..460dee11f7 100644 --- a/.github/workflows/reusable_checks.yml +++ b/.github/workflows/reusable_checks.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -57,7 +57,7 @@ jobs: ./scripts/check_license/check_headers.sh . "Apache-2.0 WITH LLVM-exception" -v - name: Run a spell check - uses: crate-ci/typos@392b78fe18a52790c53f42456e46124f77346842 # v1.34.0 + uses: crate-ci/typos@2d0ce569feab1f8752f1dde43cc2f2aa53236e06 # v1.40.0 with: config: ./.github/workflows/.spellcheck-conf.toml diff --git a/.github/workflows/reusable_codeql.yml b/.github/workflows/reusable_codeql.yml index af3ec72ab7..f2148d1cdf 100644 --- a/.github/workflows/reusable_codeql.yml +++ b/.github/workflows/reusable_codeql.yml @@ -31,23 +31,24 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Setup newer Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.10" - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/init@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 with: languages: cpp + trap-caching: false - name: "[Win] Restore vcpkg cache" if: matrix.os == 'windows-latest' - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: cache with: path: vcpkg_pkgs_cache.zip @@ -107,7 +108,7 @@ jobs: run: cmake --build ${{env.BUILD_DIR}} --config Release -j - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/analyze@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 - name: "[Win] Prepare vcpkg cache" if: matrix.os == 'windows-latest' && steps.cache.outputs.cache-hit != 'true' @@ -116,7 +117,7 @@ jobs: - name: "[Win] Save vcpkg cache" if: matrix.os == 'windows-latest' && steps.cache.outputs.cache-hit != 'true' - uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: ${{github.workspace}}/vcpkg_pkgs_cache.zip key: ${{ steps.cache.outputs.cache-primary-key }} diff --git a/.github/workflows/reusable_compatibility.yml b/.github/workflows/reusable_compatibility.yml index 98eb223bb7..621103479c 100644 --- a/.github/workflows/reusable_compatibility.yml +++ b/.github/workflows/reusable_compatibility.yml @@ -9,7 +9,10 @@ on: tag: description: Check backward compatibility with this tag type: string - default: "v1.0.0" + # While we're still compatible with v1.0.0, we implemented a fix in v1.0.1 + # to verify if the split operation is supported (in jemalloc pool). + # Without bumping the tag we'd have to omit some tests. + default: "v1.0.1" permissions: contents: read @@ -26,14 +29,14 @@ jobs: sudo apt-get install -y clang cmake hwloc libhwloc-dev libnuma-dev libtbb-dev - name: Checkout "tag" UMF version - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 ref: refs/tags/${{inputs.tag}} path: ${{github.workspace}}/tag_version - name: Checkout latest UMF version - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 path: ${{github.workspace}}/latest_version @@ -95,21 +98,20 @@ jobs: - name: Run "tag" UMF tests with latest UMF libs (warnings enabled) working-directory: ${{github.workspace}}/tag_version/build - # Exclude the test_jemalloc_pool test - - # TODO: add fix for that in v1.0.1 - run: > - UMF_LOG="level:warning;flush:debug;output:stderr;pid:no" - LD_LIBRARY_PATH=${{github.workspace}}/latest_version/build/lib/ - ctest --verbose -E test_jemalloc_pool + env: + UMF_LOG: level:warning;flush:debug;output:stderr;pid:no + LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ + run: | + ctest --verbose -E "test_memoryProvider|test_disjoint_pool" - - name: Run EXCLUDED tests with filters + - name: Run disabled tests individually with latest UMF libs (warnings enabled) working-directory: ${{github.workspace}}/tag_version/build - # Exclude the jemallocPoolName test case of the test_jemalloc_pool test - # TODO: add fix for that in v1.0.1 - run: > - UMF_LOG="level:warning;flush:debug;output:stderr;pid:no" - LD_LIBRARY_PATH=${{github.workspace}}/latest_version/build/lib/ - ./test/test_jemalloc_pool --gtest_filter="-*jemallocPoolName*" + env: + UMF_LOG: level:warning;flush:debug;output:stderr;pid:no + LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ + run: | + test/test_memoryProvider --gtest_filter="-*Trace" + test/test_disjoint_pool --gtest_filter="-test.internals" # Browse all folders in the examples directory, build them using the # latest UMF version, and run them, excluding those in the exclude list. @@ -141,14 +143,14 @@ jobs: steps: - name: Checkout "tag" UMF version - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 ref: refs/tags/${{inputs.tag}} path: ${{github.workspace}}/tag_version - name: Restore vcpkg cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: cache with: path: vcpkg_pkgs_cache.zip @@ -174,7 +176,7 @@ jobs: run: vcpkg install --triplet x64-windows - name: Checkout latest UMF version - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 path: ${{github.workspace}}/latest_version @@ -231,22 +233,15 @@ jobs: - name: Run "tag" UMF tests with latest UMF libs (warnings enabled) working-directory: ${{github.workspace}}/tag_version/build - # Exclude the test_jemalloc_pool test - - # TODO: add fix for that in v1.0.1 + env: + UMF_LOG: level:warning;flush:debug;output:stderr;pid:no run: | $env:UMF_LOG="level:warning;flush:debug;output:stderr;pid:no" cp ${{github.workspace}}/latest_version/build/bin/Debug/umf.dll ${{github.workspace}}/tag_version/build/bin/Debug/umf.dll - ctest -C Debug --verbose -E test_jemalloc_pool - - - name: Run EXCLUDED tests with filters - working-directory: ${{github.workspace}}/tag_version/build/ - # Exclude the jemallocPoolName test case of the test_jemalloc_pool test - # TODO: add fix for that in v1.0.1 - run: | - $env:UMF_LOG="level:warning;flush:debug;output:stderr;pid:no" + ctest -C Debug --verbose -E "test_memoryProvider|test_disjoint_pool" $env:Path = "${{github.workspace}}/tag_version/build/bin/Debug;${{env.VCPKG_BIN_PATH}};$env:Path" - cp ${{github.workspace}}/latest_version/build/bin/Debug/umf.dll ${{github.workspace}}/tag_version/build/bin/Debug/umf.dll - test/Debug/test_jemalloc_pool.exe --gtest_filter="-*jemallocPoolName*" + test/Debug/test_memoryProvider.exe --gtest_filter="-*Trace" + test/Debug/test_disjoint_pool.exe --gtest_filter="-test.internals" # Browse all folders in the examples directory, build them using the # latest UMF version, and run them, excluding those in the exclude list. @@ -297,7 +292,7 @@ jobs: - name: Save vcpkg cache if: steps.cache.outputs.cache-hit != 'true' - uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: ${{github.workspace}}/vcpkg_pkgs_cache.zip key: ${{ steps.cache.outputs.cache-primary-key }} @@ -313,7 +308,7 @@ jobs: steps: - name: Checkout latest UMF version - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 path: ${{github.workspace}}/latest_version @@ -345,7 +340,7 @@ jobs: run: cmake --install ${{github.workspace}}/latest_version/build --config Debug - name: Checkout "tag" UMF version - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 ref: refs/tags/${{inputs.tag}} @@ -384,21 +379,20 @@ jobs: - name: Run "tag" UMF tests with latest UMF libs (warnings enabled) working-directory: ${{github.workspace}}/tag_version/build - # Exclude the test_jemalloc_pool test - - # TODO: add fix for that in v1.0.1 - run: > - UMF_LOG="level:warning;flush:debug;output:stderr;pid:no" - LD_LIBRARY_PATH=${{github.workspace}}/latest_version/build/lib/ - ctest --verbose -E test_jemalloc_pool + env: + UMF_LOG: level:warning;flush:debug;output:stderr;pid:no + LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ + run: | + ctest --verbose -E "test_memoryProvider|test_disjoint_pool" - - name: Run EXCLUDED tests with filters + - name: Run disabled tests individually with latest UMF libs (warnings enabled) working-directory: ${{github.workspace}}/tag_version/build - # Exclude the jemallocPoolName test case of the test_jemalloc_pool test - # TODO: add fix for that in v1.0.1 - run: > - UMF_LOG="level:warning;flush:debug;output:stderr;pid:no" - LD_LIBRARY_PATH=${{github.workspace}}/latest_version/build/lib/ - ./test/test_jemalloc_pool --gtest_filter="-*jemallocPoolName*" + env: + UMF_LOG: level:warning;flush:debug;output:stderr;pid:no + LD_LIBRARY_PATH: ${{github.workspace}}/latest_version/build/lib/ + run: | + test/test_memoryProvider --gtest_filter="-*Trace" + test/test_disjoint_pool --gtest_filter="-test.internals" # Browse all folders in the examples directory, build them using the # latest UMF version, and run them, excluding those in the exclude list. diff --git a/.github/workflows/reusable_coverage.yml b/.github/workflows/reusable_coverage.yml index b632b718d8..a035eeab1a 100644 --- a/.github/workflows/reusable_coverage.yml +++ b/.github/workflows/reusable_coverage.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -32,7 +32,7 @@ jobs: sudo apt-get install -y lcov - name: Download all coverage artifacts - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: pattern: exports-coverage-* path: coverage @@ -51,7 +51,7 @@ jobs: echo "COV_OUT=$(tail -n1 output.txt | grep -oP "lines[.]+: [\d.]+%" | cut -d ' ' -f2 | tr -d '%')" >> $GITHUB_OUTPUT - name: Upload coverage report - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: coverage_html_report path: coverage/coverage_report diff --git a/.github/workflows/reusable_dax.yml b/.github/workflows/reusable_dax.yml index b86b4138c7..7dacba4e9d 100644 --- a/.github/workflows/reusable_dax.yml +++ b/.github/workflows/reusable_dax.yml @@ -65,7 +65,7 @@ jobs: rm -f ${{env.UMF_TESTS_FSDAX_PATH}} ${{env.UMF_TESTS_FSDAX_PATH_2}} - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -142,7 +142,7 @@ jobs: mkdir -p ${{env.COVERAGE_DIR}} mv ./$COVERAGE_FILE_NAME ${{env.COVERAGE_DIR}} - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ matrix.build_type == 'Debug' }} with: name: ${{env.COVERAGE_NAME}}-shared-${{matrix.shared_library}} diff --git a/.github/workflows/reusable_dockers_build.yml b/.github/workflows/reusable_dockers_build.yml index a0a84ab0e4..c9c62a3303 100644 --- a/.github/workflows/reusable_dockers_build.yml +++ b/.github/workflows/reusable_dockers_build.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -34,7 +34,7 @@ jobs: # Login and push require login/pass to GHCR - omit these steps on forks - name: Login to GitHub Container Registry if: ${{ github.event_name != 'pull_request' && github.repository == 'oneapi-src/unified-memory-framework' }} - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 with: registry: ghcr.io username: bb-ur diff --git a/.github/workflows/reusable_docs_build.yml b/.github/workflows/reusable_docs_build.yml index 2874d1e95d..b852e09182 100644 --- a/.github/workflows/reusable_docs_build.yml +++ b/.github/workflows/reusable_docs_build.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -41,8 +41,7 @@ jobs: -DUMF_BUILD_LEVEL_ZERO_PROVIDER=OFF \ -DUMF_BUILD_CUDA_PROVIDER=OFF \ -DUMF_BUILD_TESTS=OFF \ - -DUMF_BUILD_EXAMPLES=OFF \ - -DUMF_DISABLE_HWLOC=ON + -DUMF_BUILD_EXAMPLES=OFF cmake --build build --target docs # @@ -51,12 +50,12 @@ jobs: # - name: Checkout benchmark scripts if: ${{ inputs.upload == true }} - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: repository: intel/llvm - # 11.07.2025 + # 30.07.2025 # branch: sycl - ref: b68f49e0a03fb63de5a3e207f0f65247964d337b + ref: 8f54710553800eec05a6fd9717b14f995a22b137 path: sc sparse-checkout: | devops/scripts/benchmarks @@ -79,6 +78,6 @@ jobs: - name: Upload artifact if: ${{ inputs.upload == true }} - uses: actions/upload-pages-artifact@56afc609e74202658d3ffba0e8f6dda462b719fa # v3.0.1 + uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v4.0.0 with: path: build/docs_build/generated/html diff --git a/.github/workflows/reusable_fast.yml b/.github/workflows/reusable_fast.yml index 3a33604680..3bba141312 100644 --- a/.github/workflows/reusable_fast.yml +++ b/.github/workflows/reusable_fast.yml @@ -43,7 +43,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -104,12 +104,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Restore vcpkg cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: cache with: path: vcpkg_pkgs_cache.zip @@ -185,7 +185,7 @@ jobs: - name: Save vcpkg cache if: steps.cache.outputs.cache-hit != 'true' - uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: ${{github.workspace}}/vcpkg_pkgs_cache.zip key: ${{ steps.cache.outputs.cache-primary-key }} diff --git a/.github/workflows/reusable_gpu.yml b/.github/workflows/reusable_gpu.yml index f0d1bcda80..38f2cf53ec 100644 --- a/.github/workflows/reusable_gpu.yml +++ b/.github/workflows/reusable_gpu.yml @@ -65,7 +65,7 @@ jobs: echo "PROCS=$(nproc)" >> $GITHUB_ENV - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -75,7 +75,7 @@ jobs: - name: "[Win] Restore vcpkg cache" if: matrix.os == 'Windows' - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: cache with: path: vcpkg_pkgs_cache.zip @@ -184,7 +184,7 @@ jobs: mv ./$COVERAGE_FILE_NAME ${{env.COVERAGE_DIR}} - name: "[Lin] Upload coverage" - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ matrix.os == 'Ubuntu' }} with: name: ${{env.COVERAGE_NAME}}-shared-${{matrix.shared_library}} @@ -197,7 +197,7 @@ jobs: - name: "[Win] Save vcpkg cache" if: matrix.os == 'Windows' && steps.cache.outputs.cache-hit != 'true' - uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 with: path: ${{github.workspace}}/vcpkg_pkgs_cache.zip key: ${{ steps.cache.outputs.cache-primary-key }} diff --git a/.github/workflows/reusable_multi_numa.yml b/.github/workflows/reusable_multi_numa.yml index 63075c6f95..86ed771751 100644 --- a/.github/workflows/reusable_multi_numa.yml +++ b/.github/workflows/reusable_multi_numa.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -81,7 +81,7 @@ jobs: mkdir -p ${{env.COVERAGE_DIR}} mv ./$COVERAGE_FILE_NAME ${{env.COVERAGE_DIR}} - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ matrix.build_type == 'Debug' && matrix.os == 'ubuntu-22.04' }} with: name: ${{env.COVERAGE_NAME}}-${{matrix.os}}-shared-${{matrix.shared_library}} diff --git a/.github/workflows/reusable_proxy_lib.yml b/.github/workflows/reusable_proxy_lib.yml index 5aed20984b..c09f18ef02 100644 --- a/.github/workflows/reusable_proxy_lib.yml +++ b/.github/workflows/reusable_proxy_lib.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -87,7 +87,7 @@ jobs: mkdir -p ${{env.COVERAGE_DIR}} mv ./$COVERAGE_FILE_NAME ${{env.COVERAGE_DIR}} - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ matrix.build_type == 'Debug' }} with: name: ${{env.COVERAGE_NAME}}-proxy_lib_pool-${{matrix.proxy_lib_pool}} diff --git a/.github/workflows/reusable_qemu.yml b/.github/workflows/reusable_qemu.yml index df4125d1a8..9ba9c3994f 100644 --- a/.github/workflows/reusable_qemu.yml +++ b/.github/workflows/reusable_qemu.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout UMF - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 path: umf @@ -149,7 +149,7 @@ jobs: done ls -al ./coverage - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: exports-coverage-qemu-${{matrix.os}} path: coverage diff --git a/.github/workflows/reusable_sanitizers.yml b/.github/workflows/reusable_sanitizers.yml index 0af7828abb..a08ce2ccac 100644 --- a/.github/workflows/reusable_sanitizers.yml +++ b/.github/workflows/reusable_sanitizers.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 @@ -91,7 +91,7 @@ jobs: # # steps: # - name: Checkout - # uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + # uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 # with: # fetch-depth: 0 # diff --git a/.github/workflows/reusable_sycl.yml b/.github/workflows/reusable_sycl.yml index 90bdb50c83..3f6d686ca9 100644 --- a/.github/workflows/reusable_sycl.yml +++ b/.github/workflows/reusable_sycl.yml @@ -17,7 +17,7 @@ jobs: strategy: matrix: - llvm_tag: ["latest", "nightly-2025-07-09"] # "latest" or llvm with UMF v1.0.0-rc1 + llvm_tag: ["latest", "nightly-2025-07-31"] # "latest" or llvm with UMF v1.0.0 steps: # 1. Install sycl @@ -51,7 +51,7 @@ jobs: # 2. Install UMF - name: Checkout UMF - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: umf_repo fetch-depth: 0 @@ -90,7 +90,7 @@ jobs: # Arbitrarily picked tests to check the compatibility. Note that some intel/llvm tests may be flaky # Checkout the repo in the version that matches the downloaded version - name: Checkout sycl - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: repository: intel/llvm path: sycl_repo diff --git a/.github/workflows/reusable_trivy.yml b/.github/workflows/reusable_trivy.yml index d7fe24fe65..9d39cdaa4f 100644 --- a/.github/workflows/reusable_trivy.yml +++ b/.github/workflows/reusable_trivy.yml @@ -16,12 +16,12 @@ jobs: steps: - name: Clone the git repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Run Trivy - uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4 # v0.32.0 + uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1 with: scan-type: 'config' hide-progress: false @@ -38,6 +38,6 @@ jobs: cat trivy-results.sarif - name: Upload results - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 with: sarif_file: 'trivy-results.sarif' diff --git a/.github/workflows/reusable_valgrind.yml b/.github/workflows/reusable_valgrind.yml index 65bb767ed1..cc347e32bf 100644 --- a/.github/workflows/reusable_valgrind.yml +++ b/.github/workflows/reusable_valgrind.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 8187a6863d..168f748056 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -28,12 +28,12 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Run analysis - uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 + uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 with: results_file: scorecard_results.sarif results_format: sarif @@ -41,7 +41,7 @@ jobs: # Upload the results as artifacts to the repository Actions tab. - name: Upload artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 with: name: Scorecard results path: scorecard_results.sarif @@ -49,6 +49,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: Upload to code-scanning - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 with: sarif_file: scorecard_results.sarif diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml index 7cb52b0732..ea1fecaa50 100644 --- a/.github/workflows/weekly.yml +++ b/.github/workflows/weekly.yml @@ -29,7 +29,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 diff --git a/CMakeLists.txt b/CMakeLists.txt index cfec20fa0d..2a60a22502 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,7 +32,8 @@ if(UMF_CMAKE_VERSION VERSION_EQUAL "0.0.0") message( WARNING "UMF version is set to 0.0.0, which most likely is not expected! " - "Please checkout the git tags to get a proper version.") + "Please install git and checkout the git tags to get a proper version." + ) endif() if(PROJECT_VERSION_PATCH GREATER 0) @@ -75,10 +76,6 @@ umf_option(UMF_BUILD_EXAMPLES "Build UMF examples" ON) umf_option(UMF_BUILD_GPU_EXAMPLES "Build UMF GPU examples" OFF) umf_option(UMF_BUILD_FUZZTESTS "Build UMF fuzz tests (supported only on Linux with Clang)" OFF) -umf_option( - UMF_DISABLE_HWLOC - "Disable hwloc and UMF features requiring it (OS provider, memtargets, topology discovery)" - OFF) umf_option( UMF_LINK_HWLOC_STATICALLY "Link UMF with HWLOC library statically (proxy library will be disabled on Windows+Debug build)" @@ -119,13 +116,6 @@ set_property(CACHE UMF_PROXY_LIB_BASED_ON_POOL PROPERTY STRINGS ${KNOWN_PROXY_LIB_POOLS}) list(APPEND UMF_OPTIONS_LIST UMF_PROXY_LIB_BASED_ON_POOL) -if(UMF_DISABLE_HWLOC) - message( - WARNING - "UMF_DISABLE_HWLOC option is now deprecated and will be removed in v1.1.0 UMF release!" - ) -endif() - # --------------------------------------------------------------------------- # # Setup required variables, definitions; fetch dependencies; include # sub_directories based on build options; set flags; etc. @@ -267,7 +257,7 @@ else() ) endif() -if(NOT UMF_DISABLE_HWLOC AND (NOT UMF_LINK_HWLOC_STATICALLY)) +if(NOT UMF_LINK_HWLOC_STATICALLY) pkg_check_modules(LIBHWLOC hwloc>=2.3.0) if(NOT LIBHWLOC_FOUND) find_package(LIBHWLOC 2.3.0 COMPONENTS hwloc) @@ -282,110 +272,104 @@ if(NOT UMF_DISABLE_HWLOC AND (NOT UMF_LINK_HWLOC_STATICALLY)) endif() endif() -if(UMF_LINK_HWLOC_STATICALLY AND LINUX) - find_program(AUTORECONF_EXECUTABLE autoreconf) - if(NOT AUTORECONF_EXECUTABLE) - message(WARNING "autoreconf is not installed. Disabling hwloc.") - set(UMF_DISABLE_HWLOC ON) - set(UMF_LINK_HWLOC_STATICALLY OFF) +if(UMF_LINK_HWLOC_STATICALLY) + if(NOT DEFINED UMF_HWLOC_REPO) + set(UMF_HWLOC_REPO "https://github.com/open-mpi/hwloc.git") + endif() + if(NOT DEFINED UMF_HWLOC_TAG) + set(UMF_HWLOC_TAG hwloc-2.10.0) endif() -endif() -if(UMF_DISABLE_HWLOC) - message(STATUS "hwloc is disabled, hence OS provider, memtargets, " - "topology discovery, examples won't be available!") -else() - if(UMF_LINK_HWLOC_STATICALLY) - if(NOT DEFINED UMF_HWLOC_REPO) - set(UMF_HWLOC_REPO "https://github.com/open-mpi/hwloc.git") - endif() + message( + STATUS + "Will fetch hwloc from ${UMF_HWLOC_REPO} (tag: ${UMF_HWLOC_TAG}) and link it statically" + ) - if(NOT DEFINED UMF_HWLOC_TAG) - set(UMF_HWLOC_TAG hwloc-2.10.0) + if(WINDOWS) + set(HWLOC_ENABLE_TESTING OFF) + set(HWLOC_SKIP_LSTOPO ON) + set(HWLOC_SKIP_TOOLS ON) + set(HWLOC_SKIP_INCLUDES ON) + + FetchContent_Declare( + hwloc_targ + GIT_REPOSITORY ${UMF_HWLOC_REPO} + GIT_TAG ${UMF_HWLOC_TAG} + SOURCE_SUBDIR contrib/windows-cmake/) + FetchContent_MakeAvailable(hwloc_targ) + + set(HWLOC_LIB_PATH "") + if(CMAKE_GENERATOR STREQUAL "NMake Makefiles") + set(HWLOC_LIB_PATH "${hwloc_targ_BINARY_DIR}/hwloc.lib") + else() + set(HWLOC_LIB_PATH "${hwloc_targ_BINARY_DIR}/lib/hwloc.lib") endif() - message( - STATUS - "Will fetch hwloc from ${UMF_HWLOC_REPO} (tag: ${UMF_HWLOC_TAG})" - ) - if(WINDOWS) - set(HWLOC_ENABLE_TESTING OFF) - set(HWLOC_SKIP_LSTOPO ON) - set(HWLOC_SKIP_TOOLS ON) - set(HWLOC_SKIP_INCLUDES ON) - - FetchContent_Declare( - hwloc_targ - GIT_REPOSITORY ${UMF_HWLOC_REPO} - GIT_TAG ${UMF_HWLOC_TAG} - SOURCE_SUBDIR contrib/windows-cmake/) - FetchContent_MakeAvailable(hwloc_targ) - - set(HWLOC_LIB_PATH "") - if(CMAKE_GENERATOR STREQUAL "NMake Makefiles") - set(HWLOC_LIB_PATH "${hwloc_targ_BINARY_DIR}/hwloc.lib") - else() - set(HWLOC_LIB_PATH "${hwloc_targ_BINARY_DIR}/lib/hwloc.lib") - endif() - - get_filename_component(LIBHWLOC_LIBRARY_DIRS ${HWLOC_LIB_PATH} - DIRECTORY) - set(LIBHWLOC_LIBRARIES ${HWLOC_LIB_PATH}) - set(LIBHWLOC_INCLUDE_DIRS ${hwloc_targ_BINARY_DIR}/include) - set(LIBHWLOC_FOUND TRUE) - else() # not Windows - FetchContent_Declare( - hwloc_targ - GIT_REPOSITORY ${UMF_HWLOC_REPO} - GIT_TAG ${UMF_HWLOC_TAG}) - FetchContent_MakeAvailable(hwloc_targ) - - add_custom_command( - COMMAND ./autogen.sh - WORKING_DIRECTORY ${hwloc_targ_SOURCE_DIR} - OUTPUT ${hwloc_targ_SOURCE_DIR}/configure) - add_custom_command( - COMMAND - ./configure --prefix=${hwloc_targ_BINARY_DIR} - --enable-static=yes --enable-shared=no --disable-libxml2 - --disable-pci --disable-levelzero --disable-opencl - --disable-cuda --disable-nvml --disable-libudev - --disable-rsmi CFLAGS=-fPIC CXXFLAGS=-fPIC - WORKING_DIRECTORY ${hwloc_targ_SOURCE_DIR} - OUTPUT ${hwloc_targ_SOURCE_DIR}/Makefile - DEPENDS ${hwloc_targ_SOURCE_DIR}/configure) - add_custom_command( - COMMAND make - WORKING_DIRECTORY ${hwloc_targ_SOURCE_DIR} - OUTPUT ${hwloc_targ_SOURCE_DIR}/lib/libhwloc.la - DEPENDS ${hwloc_targ_SOURCE_DIR}/Makefile) - add_custom_command( - COMMAND make install - WORKING_DIRECTORY ${hwloc_targ_SOURCE_DIR} - OUTPUT ${hwloc_targ_BINARY_DIR}/lib/libhwloc.a - DEPENDS ${hwloc_targ_SOURCE_DIR}/lib/libhwloc.la) - - add_custom_target(hwloc_prod - DEPENDS ${hwloc_targ_BINARY_DIR}/lib/libhwloc.a) - add_library(hwloc INTERFACE) - target_link_libraries( - hwloc INTERFACE ${hwloc_targ_BINARY_DIR}/lib/libhwloc.a) - add_dependencies(hwloc hwloc_prod) - - set(LIBHWLOC_LIBRARY_DIRS ${hwloc_targ_BINARY_DIR}/lib) - set(LIBHWLOC_INCLUDE_DIRS ${hwloc_targ_BINARY_DIR}/include) - set(LIBHWLOC_LIBRARIES ${hwloc_targ_BINARY_DIR}/lib/libhwloc.a) - set(LIBHWLOC_FOUND TRUE) + get_filename_component(LIBHWLOC_LIBRARY_DIRS ${HWLOC_LIB_PATH} + DIRECTORY) + set(LIBHWLOC_LIBRARIES ${HWLOC_LIB_PATH}) + set(LIBHWLOC_INCLUDE_DIRS ${hwloc_targ_BINARY_DIR}/include) + set(LIBHWLOC_FOUND TRUE) + else() # not Windows + find_program(AUTORECONF_EXECUTABLE autoreconf) + if(NOT AUTORECONF_EXECUTABLE) + message( + FATAL_ERROR + "autoreconf is not installed, but it's needed in hwloc configure step. " + "Either install it, or set UMF_LINK_HWLOC_STATICALLY=OFF and install hwloc >= 2.3.0 in your system." + ) endif() - endif() # UMF_LINK_HWLOC_STATICALLY + FetchContent_Declare( + hwloc_targ + GIT_REPOSITORY ${UMF_HWLOC_REPO} + GIT_TAG ${UMF_HWLOC_TAG}) + FetchContent_MakeAvailable(hwloc_targ) - message(STATUS " LIBHWLOC_LIBRARIES = ${LIBHWLOC_LIBRARIES}") - message(STATUS " LIBHWLOC_INCLUDE_DIRS = ${LIBHWLOC_INCLUDE_DIRS}") - message(STATUS " LIBHWLOC_LIBRARY_DIRS = ${LIBHWLOC_LIBRARY_DIRS}") - message(STATUS " LIBHWLOC_API_VERSION = ${LIBHWLOC_API_VERSION}") - if(WINDOWS) - message(STATUS " LIBHWLOC_DLL_DIRS = ${LIBHWLOC_DLL_DIRS}") + add_custom_command( + COMMAND ./autogen.sh + WORKING_DIRECTORY ${hwloc_targ_SOURCE_DIR} + OUTPUT ${hwloc_targ_SOURCE_DIR}/configure) + add_custom_command( + COMMAND + ./configure --prefix=${hwloc_targ_BINARY_DIR} + --enable-static=yes --enable-shared=no --disable-libxml2 + --disable-pci --disable-levelzero --disable-opencl + --disable-cuda --disable-nvml --disable-libudev --disable-rsmi + CFLAGS=-fPIC CXXFLAGS=-fPIC + WORKING_DIRECTORY ${hwloc_targ_SOURCE_DIR} + OUTPUT ${hwloc_targ_SOURCE_DIR}/Makefile + DEPENDS ${hwloc_targ_SOURCE_DIR}/configure) + add_custom_command( + COMMAND make + WORKING_DIRECTORY ${hwloc_targ_SOURCE_DIR} + OUTPUT ${hwloc_targ_SOURCE_DIR}/lib/libhwloc.la + DEPENDS ${hwloc_targ_SOURCE_DIR}/Makefile) + add_custom_command( + COMMAND make install + WORKING_DIRECTORY ${hwloc_targ_SOURCE_DIR} + OUTPUT ${hwloc_targ_BINARY_DIR}/lib/libhwloc.a + DEPENDS ${hwloc_targ_SOURCE_DIR}/lib/libhwloc.la) + + add_custom_target(hwloc_prod + DEPENDS ${hwloc_targ_BINARY_DIR}/lib/libhwloc.a) + add_library(hwloc INTERFACE) + target_link_libraries(hwloc + INTERFACE ${hwloc_targ_BINARY_DIR}/lib/libhwloc.a) + add_dependencies(hwloc hwloc_prod) + + set(LIBHWLOC_LIBRARY_DIRS ${hwloc_targ_BINARY_DIR}/lib) + set(LIBHWLOC_INCLUDE_DIRS ${hwloc_targ_BINARY_DIR}/include) + set(LIBHWLOC_LIBRARIES ${hwloc_targ_BINARY_DIR}/lib/libhwloc.a) + set(LIBHWLOC_FOUND TRUE) endif() +endif() # UMF_LINK_HWLOC_STATICALLY + +message(STATUS " LIBHWLOC_LIBRARIES = ${LIBHWLOC_LIBRARIES}") +message(STATUS " LIBHWLOC_INCLUDE_DIRS = ${LIBHWLOC_INCLUDE_DIRS}") +message(STATUS " LIBHWLOC_LIBRARY_DIRS = ${LIBHWLOC_LIBRARY_DIRS}") +message(STATUS " LIBHWLOC_API_VERSION = ${LIBHWLOC_API_VERSION}") +if(WINDOWS) + message(STATUS " LIBHWLOC_DLL_DIRS = ${LIBHWLOC_DLL_DIRS}") endif() if(hwloc_targ_SOURCE_DIR) @@ -422,7 +406,7 @@ if(UMF_BUILD_LEVEL_ZERO_PROVIDER) else() set(LEVEL_ZERO_LOADER_REPO "https://github.com/oneapi-src/level-zero.git") - set(LEVEL_ZERO_LOADER_TAG v1.21.9) + set(LEVEL_ZERO_LOADER_TAG v1.22.4) message(STATUS "Fetching Level Zero loader (${LEVEL_ZERO_LOADER_TAG}) " "from ${LEVEL_ZERO_LOADER_REPO} ...") @@ -540,7 +524,7 @@ if(WINDOWS AND UMF_USE_DEBUG_POSTFIX) -DUMF_BUILD_TESTS=OFF -DUMF_BUILD_GPU_TESTS=OFF -DUMF_BUILD_BENCHMARKS=OFF -DUMF_BUILD_BENCHMARKS_MT=OFF -DUMF_BUILD_EXAMPLES=OFF -DUMF_BUILD_GPU_EXAMPLES=OFF - -DUMF_BUILD_FUZZTESTS=OFF -DUMF_DISABLE_HWLOC=${UMF_DISABLE_HWLOC} + -DUMF_BUILD_FUZZTESTS=OFF -DUMF_LINK_HWLOC_STATICALLY=${UMF_LINK_HWLOC_STATICALLY} -DUMF_HWLOC_NAME=${UMF_HWLOC_NAME} -DUMF_INSTALL_RPATH=${UMF_INSTALL_RPATH} -DUMF_DEVELOPER_MODE=OFF @@ -763,9 +747,7 @@ if(WINDOWS) endif() # set UMF_PROXY_LIB_ENABLED -if(UMF_DISABLE_HWLOC) - message(STATUS "Disabling the proxy library, because HWLOC is disabled") -elseif(NOT UMF_BUILD_SHARED_LIBRARY) +if(NOT UMF_BUILD_SHARED_LIBRARY) # TODO enable this scenario message( STATUS @@ -810,7 +792,7 @@ if(UMF_BUILD_BENCHMARKS) add_subdirectory(benchmark) endif() -if(UMF_BUILD_EXAMPLES AND NOT UMF_DISABLE_HWLOC) +if(UMF_BUILD_EXAMPLES) add_subdirectory(examples) endif() diff --git a/README.md b/README.md index dd17d762fe..cf5d24be75 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,9 @@ documentation, which includes the code of the [basic example](https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/basic/basic.c). There are also more advanced examples that allocate USM memory from the [Level Zero device](examples/level_zero_shared_memory/level_zero_shared_memory.c) using the Level Zero API and UMF Level Zero memory provider and [CUDA device](examples/cuda_shared_memory/cuda_shared_memory.c) using the CUDA API and UMF CUDA memory provider. +UMF's experimental CTL API is showcased in the [CTL example](examples/ctl/ctl.c), +which explores provider and pool statistics, and in the [custom CTL example](examples/ctl/custom_ctl.c), which wires CTL support into a custom memory provider. These examples rely on experimental headers which may change in future releases. + ## Build ### Requirements @@ -128,7 +131,7 @@ List of options provided by CMake: | UMF_BUILD_EXAMPLES | Build UMF examples | ON/OFF | ON | | UMF_BUILD_FUZZTESTS | Build UMF fuzz tests (supported only on Linux with Clang) | ON/OFF | OFF | | UMF_BUILD_GPU_EXAMPLES | Build UMF GPU examples | ON/OFF | OFF | -| UMF_DEVELOPER_MODE | Enable additional developer checks | ON/OFF | OFF | +| UMF_DEVELOPER_MODE | Enable additional developer checks and logs | ON/OFF | OFF | | UMF_FORMAT_CODE_STYLE | Add clang, cmake, and black -format-check and -format-apply targets to make | ON/OFF | OFF | | UMF_TESTS_FAIL_ON_SKIP | Treat skips in tests as fail | ON/OFF | OFF | | UMF_USE_ASAN | Enable AddressSanitizer checks | ON/OFF | OFF | @@ -138,7 +141,6 @@ List of options provided by CMake: | UMF_USE_VALGRIND | Enable Valgrind instrumentation | ON/OFF | OFF | | UMF_USE_COVERAGE | Build with coverage enabled (Linux only) | ON/OFF | OFF | | UMF_LINK_HWLOC_STATICALLY | Link UMF with HWLOC library statically (proxy library will be disabled on Windows+Debug build) | ON/OFF | OFF | -| UMF_DISABLE_HWLOC | Disable features that requires hwloc (OS provider, memory targets, topology discovery) | ON/OFF | OFF | ## Architecture: memory pools and providers diff --git a/RELEASE_STEPS.md b/RELEASE_STEPS.md index 9189d48048..75ca16bae8 100644 --- a/RELEASE_STEPS.md +++ b/RELEASE_STEPS.md @@ -4,8 +4,8 @@ This document contains all the steps required to make a new release of UMF. As a helper, we use in this guide these 2 variables: ```bash - set $VERSION = new full version (e.g., 0.1.0-rc1) # -rc1 included just as an example - set $VER = new major+minor only version (e.g., 0.1) + $VERSION=1.1.0-rc1 # New full version, including optional rc suffix as an example + $VER=1.1 # New major+minor only version ``` **Note:** @@ -19,47 +19,67 @@ will be released for a oneAPI release. Once all changes planned for UMF release we follow the process (described in more detail below): 1. Checkout the appropriate branch (`main` or "stable" `v$VER.x`). -2. Make changes for the release. -3. Create a new tag based on the latest commit - it takes the form +1. Make sure remotes are up-to-date on your machine (`git remote update`). +1. Make changes for the release. +1. Create a new tag based on the latest commit - it should follow the format: `v..` (e.g., `v0.1.0`). -4. Push the tag and branch to the upstream. -5. Create a new GitHub release using the tag created in the previous step. -6. Update downstream projects to utilize the release tag. If any issues arise +1. Push the tag and branch to the upstream. +1. Create a new GitHub release using the tag created in the previous step. +1. Update dependent/downstream projects to use the new release tag. If any issues arise from integration, apply any necessary hot fixes to `v$VER.x` branch and go back to step 2 - to create a patch release. This step can also be tested using `rc` version, potentially followed by another `rc` tag. ## Make a release locally -Do changes for a release: -- Start of appropriate branch: +Prepare changes for the release: +- Start of appropriate up-to-date branch: + - Fetch remotes + - `git remote update` - For patch release, do it from a stable branch: - `git checkout v$VER.x` (e.g., checkout `v0.1.x` if this is a `v0.1.1` patch) - If previously we decided not to create such branch, create it now, based on the appropriate minor or major tag - For major/minor release start from the `main` branch -- Add an entry to ChangeLog, remember to change the day of the week in the release date - - For major and minor (prior 1.0.0) releases mention API and ABI compatibility with the previous release +- Add a new entry to the `ChangeLog`, remember to change the day of the week in the release date + - For major releases mention API and ABI compatibility with the previous releases - For major and minor releases, update `UMF_VERSION_CURRENT` in `include/umf/base.h` (the API version) - For changes in ops structures, update corresponding UMF_*_OPS_VERSION_CURRENT -- For major and minor (prior 1.0.0) releases update ABI version in `.map` and `.def` files +- For major and minor releases update ABI version in `.map` and `.def` files - These files are defined for all public libraries (`libumf` and `proxy_lib`, at the moment) + - For minor releases acceptable is only adding new functions/symbols! +- Once all changes are done, build locally (and/or verify changes on CI), including: + - Verify if scanners/linters/checkers passed + - Verify if version is set properly, especially in `.dll` and `.so` files +- Create/update a VERSION file for GitHub ZIP downloads (users without git): + - `echo "$VERSION" > VERSION` + - It will always contain "the last released version". In logs/CMake build it will introduce itself as `$VERSION-dev`, only if git is not available - Commit these changes and tag the release: + - `git add VERSION` - `git commit -a -S -m "$VERSION release"` - `git tag -a -s -m "Version $VERSION" v$VERSION` +- Verify if commit and tag are properly signed: + - `git verify-commit ` + - `git verify-tag v$VERSION` - For major/minor release: - If stable branch for this release is required, create it: - `git checkout -b v$VER.x` - - For some early versions (like `0.1.0`) we may omit creation of the branch + - For some short-lived versions, creation of this branch may be skipped - For major/minor release, when release is done, add an extra "dev" tag on the `main` branch: - - `git tag -a -s -m "Development version $VERSION+1" v$VERSION+1-dev` - - for example, when `v0.1.0` is released, the dev tag would be `v0.2.0-dev` - - if needed, further in time, an extra dev tag can be introduced, e.g. `v0.2.0-dev1` + - `git tag -a -s -m "Development version $VERSION+1 - dev1" v$VERSION+1-dev1` + - for example, when `v0.1.0` is released, the dev tag would be `v0.2.0-dev1` + - if needed, further in time, an extra dev tag can be introduced, e.g. `v0.2.0-dev2` - This way, the `main` branch will introduce itself as the next version + - "dev" tag can and should be added right after we merge changes from stable to main ## Publish changes As patch releases should be done on the stable branches, pushing tags and branches differ a little. +**Note:** +> Before pushing to "upstream" it's preferred to push changes into your own fork. +> This allows you to verify the branch and tag manually in GitHub interface, and it will +> trigger the CI on your fork. + For patch release: - `git push upstream HEAD:v$VER.x v$VERSION` - push branch and tag @@ -70,13 +90,17 @@ For major/minor release: - `git checkout v$VER.x` - `git push upstream HEAD:v$VER.x` +When final release is done it's best to merge back changes from stable branch to main. +This situation can happen if the stable branch was created before the final release (e.g. +with one of the RC versions). Thanks to that all the changes, including ChangeLog will land +on the main branch. After such merge-back it's advised to add "dev" tag (described above). + ## Announce release To make the release official: - Go to [GitHub's releases tab](https://github.com/oneapi-src/unified-memory-framework/releases/new): - Tag version: `v$VERSION`, release title: UMF $VERSION, description: copy entry from ChangeLog and format it with no tabs and no characters limit in line - - Prior to version 1.0.0, check the *Set as a pre-release* tick box. -- Announce the release, where needed +- Announce the release in all appropriate channels ## More information diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt index 9b46ed6ea0..1c417af67d 100644 --- a/benchmark/CMakeLists.txt +++ b/benchmark/CMakeLists.txt @@ -2,22 +2,28 @@ # Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -include(FetchContent) -FetchContent_Declare( - googlebenchmark - GIT_REPOSITORY https://github.com/google/benchmark.git - GIT_TAG v1.9.0) - -set(BENCHMARK_ENABLE_GTEST_TESTS - OFF - CACHE BOOL "" FORCE) -set(BENCHMARK_ENABLE_TESTING - OFF - CACHE BOOL "" FORCE) -set(BENCHMARK_ENABLE_INSTALL - OFF - CACHE BOOL "" FORCE) -FetchContent_MakeAvailable(googlebenchmark) +set(GBENCH_VERSION 1.9.0) + +find_package(benchmark ${GBENCH_VERSION} QUIET) + +if(NOT benchmark_FOUND) + include(FetchContent) + FetchContent_Declare( + googlebenchmark + GIT_REPOSITORY https://github.com/google/benchmark.git + GIT_TAG v${GBENCH_VERSION}) + + set(BENCHMARK_ENABLE_GTEST_TESTS + OFF + CACHE BOOL "" FORCE) + set(BENCHMARK_ENABLE_TESTING + OFF + CACHE BOOL "" FORCE) + set(BENCHMARK_ENABLE_INSTALL + OFF + CACHE BOOL "" FORCE) + FetchContent_MakeAvailable(googlebenchmark) +endif() # In MSVC builds, there is no way to determine the actual build type during the # CMake configuration step. Therefore, this message is printed in all MSVC diff --git a/cmake/helpers.cmake b/cmake/helpers.cmake index 8fea0a7431..8ee14bd377 100644 --- a/cmake/helpers.cmake +++ b/cmake/helpers.cmake @@ -11,10 +11,12 @@ include(CheckCCompilerFlag) include(CheckCXXCompilerFlag) # This function establishes version variables based on the git describe output. -# If there's no git available in the system, the version will be set to "0.0.0". -# If git reports only a hash, the version will be set to "0.0.0.git.". -# Otherwise we'll use 3-component version: major.minor.patch, just for CMake's -# sake. A few extra variables will be set for Win dll metadata. +# If there's no git available in the system, it falls back to reading a VERSION +# file from the project root. If neither git nor VERSION file is available, the +# version will be set to "0.0.0". If git reports only a hash, the version will +# be set to "0.0.0.git.". Otherwise we'll use 3-component version: +# major.minor.patch, just for CMake's sake. A few extra variables will be set +# for Win dll metadata. # # Important note: CMake does not support rc or git information. According to # semver rules, 1.5.1-rc1 should be less than 1.5.1, but it seems hard to @@ -78,8 +80,24 @@ function(set_version_variables) OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET) if(NOT GIT_VERSION) - # no git or it reported no version. Use default ver: "0.0.0" - return() + # no git or it reported no version. Try fallback to VERSION file + if(EXISTS "${UMF_CMAKE_SOURCE_DIR}/VERSION") + file(READ "${UMF_CMAKE_SOURCE_DIR}/VERSION" FILE_VERSION) + string(STRIP ${FILE_VERSION} FILE_VERSION) + if(FILE_VERSION) + set(GIT_VERSION "v${FILE_VERSION}-dev") + message( + STATUS + "Using version from VERSION file: ${FILE_VERSION}. To get detailed version, use git and fetch tags." + ) + else() + # VERSION file exists but is empty, use default ver: "0.0.0" + return() + endif() + else() + # no git and no VERSION file. Use default ver: "0.0.0" + return() + endif() endif() # v1.5.0 - we're exactly on a tag -> UMF ver: "1.5.0" diff --git a/docs/config/api.rst b/docs/config/api.rst index 8796234280..609ffc147c 100644 --- a/docs/config/api.rst +++ b/docs/config/api.rst @@ -170,6 +170,26 @@ Memtarget .. doxygenfile:: experimental/memtarget.h :sections: define enum typedef func +Memory Properties +========================================== + +Memory properties in UMF describe the characteristics and capabilities of +different memory regions or allocations. These properties can include +information such as memory type, allocation size, context and device used for +allocation, and other attributes that are relevant for memory management. + +The Memory Properties API allows users to retrieve and interpret these +attributes for memory managed by UMF, enabling advanced memory management +strategies and improved interoperability with heterogeneous systems. + +.. note:: + The memory properties APIs are experimental and may change in future releases. + +Memory Properties +------------------------------------------ +.. doxygenfile:: experimental/memory_properties.h + :sections: define enum typedef func var + Inter-Process Communication ========================================== diff --git a/docs/config/ctl.rst b/docs/config/ctl.rst new file mode 100644 index 0000000000..abd622aaab --- /dev/null +++ b/docs/config/ctl.rst @@ -0,0 +1,717 @@ +================ +CTL +================ + +UMF's CTL is a mechanism for advanced configuration and control of UMF pools +and providers. It allows programmatic access to provider- or pool-specific +configuration options, statistics and auxiliary APIs. CTL entries can also be +set through environment variables or a configuration file, allowing adjustment +of UMF behavior without modifying the program. + +.. note:: + The CTL API is experimental and may change in future releases. + +Main concepts +============= + +The core concept is a *path*. A path is a string of nodes separated by periods. +You can imagine nodes as directories where the last element is a file that can +be read, written or executed (similar to ``sysfs`` but with periods instead of +slashes). Example path ``umf.logger.level`` controls the log level. You can +access it with:: + + int level; + umf_result_t ret = umfCtlGet("umf.logger.level", &level, sizeof(level)); + +To change the level programmatically use:: + + int level = LOG_WARNING; + umf_result_t ret = umfCtlSet("umf.logger.level", &level, sizeof(level)); + +Accessing pool or provider paths is slightly more involved. For example:: + + size_t alloc_count; + umf_memory_pool_handle_t hPool = createPool(); + umf_result_t ret = umfCtlGet("umf.pool.by_handle.{}.stats.alloc_count", + &alloc_count, sizeof(alloc_count), hPool); + +The ``umf.pool.by_handle`` prefix selects a pool addressed by its handle. +Every ``{}`` in the path is replaced with an extra argument passed to the CTL +function. Alternative addressing methods are described below. + +Pool / Provider addressing +============================ + +Two addressing schemes are provided: ``by_handle`` and ``by_name``. Each pool +and provider has a unique handle and an optional user-defined name that can be +queried with ``umfMemoryProviderGetName()`` or ``umfMemoryPoolGetName()``. +When using ``by_name`` the name appears in the path, e.g.:: + + umfCtlGet("umf.pool.by_name.myPool.stats.alloc_count", + &alloc_count, sizeof(alloc_count)); + +If multiple pools share a name, read operations must disambiguate the target by +appending an index after the name:: + + umfCtlGet("umf.pool.by_name.myPool.0.stats.alloc_count", + &alloc_count, sizeof(alloc_count)); + +The number of pools with a given name can be obtained with the ``count`` node. + +Wildcards +=========== + +A ``{}`` in the path acts as a wildcard and is replaced with successive +arguments of ``umfCtlGet``, ``umfCtlSet`` or ``umfCtlExec``. Wildcards can +replace any node, not only handles. For example:: + + size_t pool_count; + const char *name = "myPool"; + umfCtlGet("umf.pool.by_name.{}.count", &pool_count, sizeof(pool_count), + name); + for (size_t i = 0; i < pool_count; i++) { + umfCtlGet("umf.pool.by_name.{}.{}.stats.alloc_count", &alloc_count, + sizeof(alloc_count), name, i); + } + +Ensure that the types of wildcard arguments match the expected node types. + +Default addressing +=================== + +``umf.provider.default`` and ``umf.pool.default`` store default values applied +to providers or pools created after the defaults are set. For example:: + + size_t capacity = 16; + umfCtlSet("umf.pool.default.disjoint.params.capacity", &capacity, + sizeof(capacity)); + +Every subsequently created disjoint pool will use ``16`` as its starting +capacity overriding its creation parameters. Defaults are keyed by the +name returned from the provider or pool ``get_name`` callback, so if pool/provider +has custom name it must be addressed explicitly. Defaults may be supplied programmatically +or via environment variable and are saved internally and applied during initialization of a +matching provider or pool. + +Environment variables +===================== + +CTL entries may also be specified in the ``UMF_CONF`` environment variable or +a configuration file specified in the ``UMF_CONF_FILE``. +Multiple entries are separated with semicolons, e.g.:: + + UMF_CONF="umf.logger.output=stdout;umf.logger.level=0" + +CTL options available through environment variables are limited — you can only +target default nodes when addressing pools. This means that configuration +strings can influence values consumed during pool creation but cannot alter +runtime-only parameters. + +============ +CTL nodes +============ + +The CTL hierarchy is rooted at ``umf``. The next component selects one of the +major subsystems: + +* ``umf.logger`` – logging configuration and diagnostics. +* ``umf.provider`` – provider-specific parameters, statistics and commands. +* ``umf.pool`` – pool-specific parameters, statistics and inspection helpers. + +Within each subsystem the path continues with an addressing scheme followed by +the module or leaf of interest. + +Reading below sections +======================= + +Parameter annotations describe the values stored in the node rather than the +pointer types passed to ``umfCtlGet``/``umfCtlSet``/``umfCtlExec``. The +**Access** field indicates whether the node can be read, written, or executed. +The **Defaults / Env** field notes whether the entry can be controlled through +defaults written under ``umf.provider.default.`` or +``umf.pool.default.`` and via ``UMF_CONF``/``UMF_CONF_FILE``. Nodes that do +not accept either configuration source are marked as not supported. + +Logger nodes +================ + +.. py:function:: umf.logger.timestamp(enabled) + + :param enabled: Receives or supplies ``0`` when timestamps are disabled and + ``1`` when they are emitted. + :type enabled: ``int`` + + **Access:** read-write. + **Defaults / Env:** supported. + + Toggle timestamp prefixes in future log records. Logging starts with + timestamps disabled, and the flag affects only messages emitted after the + change. + +.. py:function:: umf.logger.pid(enabled) + + :param enabled: Receives or supplies ``0`` to omit the process identifier and + ``1`` to include it in every message header. + :type enabled: ``int`` + + **Access:** read-write. + **Defaults / Env:** supported. + + Controls whether each log line is annotated with the current process id. + Logging omits the pid by default. Setting non-boolean values results in + coercion to zero/non-zero; the change applies to subsequent messages only. + +.. py:function:: umf.logger.level(level) + + :param level: Receives or supplies the minimum severity that will be written. + :type level: ``int`` (``0`` .. ``4``) + + **Access:** read-write. + **Defaults / Env:** supported. + + Sets the filtering threshold for the logger. Records below the configured + level are dropped. Writes that fall outside the enumerated range are + rejected. 0 means debug logs, 1 means info logs, 2 means warnings, 3 means + errors, and 4 means fatal logs. Until an output is selected the logger + ignores the level because logging is disabled. + +.. py:function:: umf.logger.flush_level(level) + + :param level: Receives or supplies the severity at which the logger forces a + flush of the output stream. + :type level: ``int`` (``0`` .. ``4``) + + **Access:** read-write. + **Defaults / Env:** supported. + + Adjusts when buffered log data is synchronously flushed. Writes outside the + valid severity range fail, and lowering the level can incur additional flush + overhead for future messages. With logging disabled no flushing occurs. + +.. py:function:: umf.logger.output(path) + + :param path: Receives the currently selected sink on reads. On writes, pass + ``"stdout"`` or ``"stderr"`` to redirect to standard streams, a + NULL-terminated file path to append to a file, or ``NULL`` to disable + logging altogether. + :type path: ``char *`` when reading, ``const char *`` when writing + + **Access:** read-write. + **Defaults / Env:** supported. + + Controls the destination for log messages. The logger closes any previously + opened file when switching targets. Providing a path longer than 256 bytes or + pointing to a file that cannot be opened causes the write to fail. Special + values ``"stdout"`` and ``"stderr"`` redirect output to the corresponding + streams. Passing ``NULL`` disables logging entirely, which is also the + initial state until a path is provided. + +Provider nodes +================ + +Provider entries are organized beneath ``umf.provider``. Use +``umf.provider.by_handle.{provider}`` with a +:type:`umf_memory_provider_handle_t` argument to reach a specific provider. +Providers can also be addressed by name through ``umf.provider.by_name.{provider}``; +append ``.{index}`` to address specific provider when multiple providers share the same label. +Defaults for future providers reside under ``umf.provider.default.{provider}`` where ``{provider}`` is +a name returned by each provider's ``get_name`` implementation. Providers have their +default names (``OS``, ``FILE``, ``DEVDAX``, ``FIXED``, ``CUDA`` or ``LEVEL_ZERO``), +unless their name was changed during creation, those renamed providers must be addressed explicitly. +Defaults can be written via ``umf.provider.default.`` either programmatically or through +configuration strings. The entries below list only the suffix of each node; +prefix them with the appropriate ``umf.provider`` path. + +Common provider statistics +-------------------------- + +.. py:function:: .stats.allocated_memory(bytes) + + Accessible through both ``umf.provider.by_handle.{provider}`` and + ``umf.provider.by_name.{name}``. Supply the provider handle or name (with an + optional ``.{index}`` suffix for duplicates) as the first wildcard argument. + + :param bytes: Receives the total number of bytes currently outstanding. + :type bytes: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Returns the amount of memory the provider has allocated but not yet freed. + The counter updates atomically as the provider serves requests and is not + resettable. + +.. py:function:: .stats.peak_memory(bytes) + + Available via ``umf.provider.by_handle.{provider}`` or + ``umf.provider.by_name.{name}``. Pass the provider selector as the first + wildcard argument. + + :param bytes: Receives the highest observed outstanding allocation size since + the last reset. + :type bytes: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Reports the historical maximum allocation footprint of the provider. Combine + with :py:func:`.stats.peak_memory.reset()` to discard stale peaks when + desired. + +.. py:function:: .stats.peak_memory.reset() + + Invoke through ``umf.provider.by_handle.{provider}`` or + ``umf.provider.by_name.{name}`` after supplying the provider selector as the + first wildcard argument. + + **Access:** execute. + **Defaults / Env:** not supported. + + Resets the peak allocation counter to the provider's current outstanding + usage. The operation does not affect other statistics and can be invoked at + any time. + +OS memory provider (``OS``) +--------------------------- + +The OS provider supports the common statistics nodes described above and adds +the following parameter entry. + +.. py:function:: .params.ipc_enabled(enabled) + + :param enabled: Receives ``0`` when inter-process sharing is disabled and a + non-zero value when it is active. + :type enabled: ``int`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Indicates whether the OS memory provider has been initialized with IPC + support. The value is fixed at provider creation time and cannot be modified + afterwards. + +Fixed memory provider (``FIXED``) +----------------------------------- + +The fixed-size allocation provider currently exposes only the common statistics +nodes. + +DevDAX memory provider (``DEVDAX``) +------------------------------------- + +The DevDAX provider exposes the common statistics nodes described earlier. + +File memory provider (``FILE``) +----------------------------------- + +The file-backed provider exposes the common statistics nodes. + +CUDA memory provider (``CUDA``) +----------------------------------- + +The CUDA provider currently exposes only the common statistics nodes. + +Level Zero memory provider (``LEVEL_ZERO``) +----------------------------------------------- + +The Level Zero provider supports the common statistics nodes described above and +adds the following parameter entry. + +.. py:function:: .params.use_import_export_for_IPC(policy) + + :param policy: Receives or supplies ``0`` to use IPC API for memory sharing + and ``1`` to use import/export mechanism for memory sharing. + :type policy: ``int`` + + **Access:** read-write. + **Defaults / Env:** Supported. + + Controls the memory exchange policy for inter-process communication + operations. When set to ``0`` (default), the provider uses the IPC API + for memory sharing between processes. When set to ``1``, the provider uses + the import/export mechanism for memory sharing. This option is supported + only on Windows with the Level Zero provider, where the default IPC mechanism + does not work. Note that enabling import/export adds overhead during + allocation and deallocation for all allocations on the current provider. + +Pool nodes +========== + +Pool entries mirror the provider layout. ``umf.pool.by_handle.{pool}`` accepts a +:type:`umf_memory_pool_handle_t`, while ``umf.pool.by_name.{pool}`` addresses +pools by name with an optional ``.{index}`` suffix when names are reused. +Defaults for future pools reside under ``umf.pool.default.{pool}`` and track the +name returned by each pool's ``get_name`` implementation. Pools that keep their +default names (``disjoint``, ``scalable`` and ``jemalloc``) continue to match +those entries, while renamed pools must be addressed explicitly. Defaults can be +written via ``umf.pool.default.`` either programmatically or through +configuration strings. The entries below list only the suffix of each node; +prefix them with the appropriate ``umf.pool`` path. + +Common pool statistics +-------------------------- + +.. py:function:: .stats.alloc_count(count) + + :param count: Receives the number of live allocations tracked by the pool. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Counts the allocations currently outstanding according to the pool's public + allocation API. The value increments on successful allocations and + decrements when memory is released. + +Disjoint pool (``disjoint``) +-------------------------------- + +.. py:function:: .params.slab_min_size(bytes) + + :param bytes: Receives or supplies the minimum slab size requested from the + provider. + :type bytes: ``size_t`` + + **Access:** read-write. (write is only available through defaults) + **Defaults / Env:** supported. + + Governs how much memory the pool grabs in each slab. Lower values reduce + per-allocation slack while higher values amortize provider overhead. Writes + are accepted only before the pool completes its ``post_initialize`` phase. + +.. py:function:: .params.max_poolable_size(bytes) + + :param bytes: Receives or supplies the largest allocation size that is still + cached by the pool. + :type bytes: ``size_t`` + + **Access:** read-write. (write is only available through defaults) + **Defaults / Env:** supported. + + Sets the cut-off for pooling allocations. Requests larger than this value are + delegated directly to the provider. Updates must occur before + ``post_initialize`` completes. + +.. py:function:: .params.capacity(count) + + :param count: Receives or supplies the maximum number of slabs each bucket + may retain. + :type count: ``size_t`` + + **Access:** read-write. (write is only available through defaults) + **Defaults / Env:** supported. + + Caps the pool's cached slabs per bucket to limit memory retention. Shrinking + the capacity may cause future frees to return slabs to the provider. Writes + are rejected after ``post_initialize``. + +.. py:function:: .params.min_bucket_size(bytes) + + :param bytes: Receives or supplies the minimal allocation size a bucket may + serve. + :type bytes: ``size_t`` + + **Access:** read-write. (write is only available through defaults) + **Defaults / Env:** supported. + + Controls the smallest chunk size kept in the pool, which in turn affects the + number of buckets. Writes are validated for size correctness and disallowed + after ``post_initialize``. + +.. py:function:: .params.pool_trace(level) + + :param level: Receives or supplies the tracing level for the pool. + :type level: ``int`` (``0`` disables tracing) + + **Access:** read-write. (write is only available through defaults) + **Defaults / Env:** supported. + + Controls the disjoint pool's tracing features. ``0`` disables tracing. + ``1`` records slab usage totals exposed through the ``.stats.curr_slabs_*`` + and ``.stats.max_slabs_*`` nodes. ``2`` additionally tracks allocation and + free counters and prints a usage summary when the pool is destroyed. Values + greater than ``2`` also emit debug logs for every allocation and free. + Tracing must be activated before ``post_initialize``; attempting to change it + later fails with ``UMF_RESULT_ERROR_NOT_SUPPORTED``. + +.. py:function:: .stats.used_memory(bytes) + + Available under ``umf.pool.by_handle.disjoint`` and + ``umf.pool.by_name.disjoint``. Provide the pool selector as the first wildcard + argument. + + :param bytes: Receives the amount of memory that is presently allocated by + the pool's clients. + :type bytes: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Reports the memory currently in use across all slabs by active allocations. + Available even when ``pool_trace`` is disabled. + +.. py:function:: .stats.reserved_memory(bytes) + + :param bytes: Receives the total number of bytes reserved in slabs that the + pool owns. + :type bytes: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Returns the total slab capacity reserved by the pool, including cached free + space. Available even when ``pool_trace`` is disabled. + +.. py:function:: .stats.alloc_num(count) + + :param count: Receives the number of allocations the pool has issued. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Counts every + allocation handed out by the pool since it was created. + +.. py:function:: .stats.alloc_pool_num(count) + + :param count: Receives the number of allocations served directly from cached + slabs. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Counts + allocations served from cached slabs without visiting the provider. + +.. py:function:: .stats.free_num(count) + + :param count: Receives the total number of frees processed by the pool. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Tracks the + number of frees observed by the pool since its creation. + +.. py:function:: .stats.curr_slabs_in_use(count) + + :param count: Receives the current number of slabs actively serving + allocations. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Returns the number of + slabs that currently have live allocations. + +.. py:function:: .stats.curr_slabs_in_pool(count) + + :param count: Receives how many slabs are cached and ready for reuse. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Reports the slabs + retained in the pool for future reuse. + +.. py:function:: .stats.max_slabs_in_use(count) + + :param count: Receives the historical maximum of simultaneously used slabs. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Provides the peak + number of slabs that were in use at the same time. + +.. py:function:: .stats.max_slabs_in_pool(count) + + :param count: Receives the largest number of slabs retained in the cache. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Returns the highest + number of slabs ever retained in the cache simultaneously. + +.. py:function:: .buckets.count(count) + + :param count: Receives the number of distinct bucket sizes. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Returns the total number of buckets in the pool. + +.. py:function:: .buckets.{id}.size(bytes) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param bytes: Receives the allocation size that the bucket serves. + :type bytes: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Reports the allocation size serviced by the selected bucket. This value is + available even when tracing is disabled. + +.. py:function:: .buckets.{id}.stats.alloc_num(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives the number of allocations performed by this bucket. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Counts every + allocation that passed through the specified bucket. + +.. py:function:: .buckets.{id}.stats.alloc_pool_num(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives the number of allocations satisfied from cached slabs + in this bucket. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Counts how many + allocations were served entirely from the bucket's cached slabs. + +.. py:function:: .buckets.{id}.stats.free_num(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives the number of frees recorded for this bucket. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` set to ``2`` or higher. Tracks the + number of frees observed for the bucket. + +.. py:function:: .buckets.{id}.stats.curr_slabs_in_use(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives how many slabs for this bucket currently serve + allocations. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Returns the current + slab utilization for the bucket. + +.. py:function:: .buckets.{id}.stats.curr_slabs_in_pool(count) + + Available through ``umf.pool.by_handle.disjoint`` and + ``umf.pool.by_name.disjoint``. Provide the pool selector and bucket index as + the first two wildcard arguments. ``{id}`` denotes a bucket index of type + ``size_t``. Valid indices range from ``0`` to ``.buckets.count - 1``. + + :param count: Receives the number of slabs cached and immediately available + for this bucket. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Reports cached slabs + that the bucket can reuse without a provider call. + +.. py:function:: .buckets.{id}.stats.max_slabs_in_use(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives the peak number of slabs in use for this bucket. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Provides the + historical maximum of slabs simultaneously in use for the bucket. + +.. py:function:: .buckets.{id}.stats.max_slabs_in_pool(count) + + ``{id}`` denotes a bucket index of type ``size_t``. Valid indices range from + ``0`` to ``.buckets.count - 1``. + + :param count: Receives the largest number of slabs retained in the bucket's + cache. + :type count: ``size_t`` + + **Access:** read-only. + **Defaults / Env:** not supported. + + Requires tracing with ``pool_trace`` of at least ``1``. Returns the maximum + number of slabs cached for later use by the bucket. + + +Scalable pool (``scalable``) +------------------------------ + +The scalable pool currently exposes only the common statistics nodes. + +Jemalloc pool (``jemalloc``) +-------------------------------- + +The jemalloc-backed pool currently exposes only the common statistics nodes. + +================================================ +Adding CTL support to custom providers and pools +================================================ + +The :file:`examples/ctl/custom_ctl.c` source demonstrates how a minimal +provider can expose configuration entries, statistics and runnables through the +CTL API. To add similar support to your own provider or pool you must implement +an ``ext_ctl`` callback – parse incoming CTL paths and handle +``CTL_QUERY_READ``, ``CTL_QUERY_WRITE`` and ``CTL_QUERY_RUNNABLE`` requests. +The callback receives a ``umf_ctl_query_source_t`` indicating whether the +query came from the application or a configuration source. Programmatic +calls pass typed binary data, while configuration sources deliver strings +that must be parsed. Wildcards (``{}``) may appear in paths and are supplied +as additional arguments. + +During initialization UMF will execute ``post_initialize`` on the callback after +applying any queued defaults, allowing the provider or pool to finalize its +state before it is used by the application. The example converts wildcarded +paths into ``printf``-style format strings with ``%s`` and uses ``vsnprintf`` to +resolve the extra arguments. It also shows a helper that accepts integers from +either source, printing the final values from ``post_initialize``. + +Building and running the example: + +.. code-block:: bash + + cmake -B build + cmake --build build + ./build/examples/umf_example_ctl + +An optional modulus can be supplied via the environment: + +.. code-block:: bash + + UMF_CONF="umf.provider.default.ctl.m=10" ./build/examples/umf_example_ctl diff --git a/docs/config/examples.rst b/docs/config/examples.rst index a09638da92..28b6ab02e7 100644 --- a/docs/config/examples.rst +++ b/docs/config/examples.rst @@ -147,6 +147,59 @@ in the UMF repository. TODO +CTL example +============================================================================== + +.. note:: + The CTL API is experimental and may change in future releases. + +You can find the full example code in the `examples/ctl/ctl.c`_ file in the +UMF repository. + +The sample configures an OS memory provider and a disjoint pool, reuses the +provider's canonical ``OS`` selector obtained at runtime, assigns a custom pool +name, and then mixes ``by_handle`` and ``by_name`` selectors to explore CTL +statistics. Wildcard nodes are used to choose provider counters, build a +four-segment ``{}.{}`` chain for the named pool, reset the peak tracker, and +drill into per-bucket disjoint pool telemetry. The program prints hints on +``stderr`` explaining which tracing level is necessary when a statistic is +unavailable. + +Build and run the example with:: + + cmake -B build + cmake --build build + ./build/examples/umf_example_ctl_statistics + +Detailed disjoint pool counters are disabled unless tracing is configured +before pool creation. Enable them through the environment:: + + UMF_CONF="umf.pool.default.disjoint.params.pool_trace=2" ./build/examples/umf_example_ctl_statistics + +Tracing level ``1`` enables slab usage counters, level ``2`` adds allocation +and free statistics, and level ``3`` additionally emits verbose log messages +from the pool implementation. + +Custom CTL example +============================================================================== + +You can find the full example code in the `examples/ctl/custom_ctl.c`_ file in +the UMF repository. The program implements a minimal memory provider with CTL +hooks that accept configuration values, execute runnables, and expose provider +state through the experimental API. It highlights converting wildcard segments +to ``printf``-style format strings and reading integers supplied via +configuration defaults. + +Build and run the example with:: + + cmake -B build + cmake --build build + ./build/examples/umf_example_ctl + +Optionally supply a modulus via configuration defaults:: + + UMF_CONF="umf.provider.default.ctl.m=10" ./build/examples/umf_example_ctl + IPC example with Level Zero Memory Provider ============================================================================== The full code of the example is in the `examples/ipc_level_zero/ipc_level_zero.c`_ file in the UMF repository. @@ -231,6 +284,8 @@ the :any:`umfCloseIPCHandle` function is called. .. _examples/cuda_shared_memory/cuda_shared_memory.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/cuda_shared_memory/cuda_shared_memory.c .. _examples/ipc_level_zero/ipc_level_zero.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/ipc_level_zero/ipc_level_zero.c .. _examples/custom_file_provider/custom_file_provider.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/custom_file_provider/custom_file_provider.c +.. _examples/ctl/ctl.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/ctl/ctl.c +.. _examples/ctl/custom_ctl.c: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/ctl/custom_ctl.c .. _examples/memspace: https://github.com/oneapi-src/unified-memory-framework/blob/main/examples/memspace/ .. _README: https://github.com/oneapi-src/unified-memory-framework/blob/main/README.md#memory-pool-managers .. _umf/ipc.h: https://github.com/oneapi-src/unified-memory-framework/blob/main/include/umf/ipc.h diff --git a/docs/config/index.rst b/docs/config/index.rst index 3bd20828ff..4447dcd74b 100644 --- a/docs/config/index.rst +++ b/docs/config/index.rst @@ -1,4 +1,4 @@ -.. Copyright 2023 Intel Corporation +.. Copyright 2023-2025 Intel Corporation Intel Unified Memory Framework documentation Intel Unified Memory Framework documentation @@ -10,4 +10,6 @@ Intel Unified Memory Framework documentation introduction.rst examples.rst api.rst + ctl.rst glossary.rst + diff --git a/docs/config/spelling_exceptions.txt b/docs/config/spelling_exceptions.txt index d4e40a3ec8..f329b7c6f3 100644 --- a/docs/config/spelling_exceptions.txt +++ b/docs/config/spelling_exceptions.txt @@ -3,22 +3,24 @@ allocatable allocator allocators calloc -CXL copyable +CUcontext +CUdevice customizable +CXL daxX -deallocation deallocating +deallocation deallocations -Devdax dev +Devdax Globals +highPtr hMemtarget hPool hProvider -highPtr -io interprocess +io ipc jemalloc lowPtr @@ -35,6 +37,7 @@ Memtarget memtarget memtargets middleware +minBytesToKeep multithreading Nodemask nodemask @@ -47,21 +50,29 @@ partList pid poolable preallocated -providerIpcData +programmatically +propertyId providential +providerIpcData ptr realloc +runnables Scalable scalable stdout Tiering tiering topologies +uint +uintptr umf umfGetIPCHandle +umfGetMemoryPropertySize umfMemoryProviderAlloc umfMemoryProviderGetLastNativeError umfMemoryProviderOpenIPCHandle +umfMemspaceMemtargetAdd +umfMemspaceUserFilter umfOsMemoryProviderParamsDestroy umfPool umfPoolCalloc @@ -71,4 +82,8 @@ umfPoolMallocUsableSize umfPoolRealloc umfMemspaceUserFilter umfMemspaceMemtargetAdd -unfreed \ No newline at end of file +unfreed +usm +wildcarded +zA +ze diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 91f47901cb..c79e950ed2 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -273,6 +273,42 @@ if(LINUX) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() + set(EXAMPLE_NAME umf_example_ctl) + + add_umf_executable( + NAME ${EXAMPLE_NAME} + SRCS ctl/custom_ctl.c + LIBS umf ${UMF_HWLOC_NAME}) + + target_include_directories( + ${EXAMPLE_NAME} PRIVATE ${UMF_CMAKE_SOURCE_DIR}/src/utils + ${UMF_CMAKE_SOURCE_DIR}/include) + + target_link_directories(${EXAMPLE_NAME} PRIVATE ${LIBHWLOC_LIBRARY_DIRS}) + + add_test( + NAME ${EXAMPLE_NAME} + COMMAND ${EXAMPLE_NAME} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + + set(EXAMPLE_NAME umf_example_ctl_statistics) + + add_umf_executable( + NAME ${EXAMPLE_NAME} + SRCS ctl/ctl.c + LIBS umf ${UMF_HWLOC_NAME}) + + target_include_directories( + ${EXAMPLE_NAME} PRIVATE ${UMF_CMAKE_SOURCE_DIR}/src/utils + ${UMF_CMAKE_SOURCE_DIR}/include) + + target_link_directories(${EXAMPLE_NAME} PRIVATE ${LIBHWLOC_LIBRARY_DIRS}) + + add_test( + NAME ${EXAMPLE_NAME} + COMMAND ${EXAMPLE_NAME} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + if(UMF_POOL_JEMALLOC_ENABLED) set(EXAMPLE_NAME umf_example_dram_and_fsdax) diff --git a/examples/README.md b/examples/README.md index 70d114a63a..4f16e19067 100644 --- a/examples/README.md +++ b/examples/README.md @@ -66,3 +66,31 @@ processes: a producer and a consumer that communicate in the following way - Producer puts the IPC handle - Consumer shuts down - Producer shuts down + +## CTL example + +> **Note**: The CTL API is experimental and may change in future releases. + +This example configures an OS memory provider and disjoint pool, then queries +statistics through CTL using both ``by_handle`` and ``by_name`` selectors. It +demonstrates wildcard nodes to mix selectors, reset peak counters, and read +disjoint-pool bucket telemetry. Run it with: + + ./umf_example_ctl_statistics + +Tracing for detailed disjoint pool counters can be enabled through: + + UMF_CONF="umf.pool.default.disjoint.params.pool_trace=2" ./umf_example_ctl_statistics + +## Custom CTL example + +> **Note**: The CTL API is experimental and may change in future releases. + +This example demonstrates how to add CTL support to a custom memory +provider. It sets variables ``a`` and ``b`` through CTL, plus it allows +for the modulus ``m`` to be loaded from the environment or a configuration file. +Addition and subtraction operations return results modulo ``m`` and the +result ``c`` can be retrieved using the CTL API. For example, to set the +modulus through an environment variable run: + + UMF_CONF="umf.provider.default.ctl.m=10" ./umf_example_ctl diff --git a/examples/ctl/CMakeLists.txt b/examples/ctl/CMakeLists.txt new file mode 100644 index 0000000000..26fee9e83d --- /dev/null +++ b/examples/ctl/CMakeLists.txt @@ -0,0 +1,81 @@ +# Copyright (C) 2025 Intel Corporation +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +cmake_minimum_required(VERSION 3.14.0 FATAL_ERROR) +project(umf_example_ctl LANGUAGES C) +enable_testing() + +set(UMF_EXAMPLE_DIR "${CMAKE_SOURCE_DIR}/..") +list(APPEND CMAKE_MODULE_PATH "${UMF_EXAMPLE_DIR}/cmake") +message(STATUS "CMAKE_MODULE_PATH=${CMAKE_" "MODULE_PATH}") + +find_package(PkgConfig) +pkg_check_modules(LIBUMF libumf) +if(NOT LIBUMF_FOUND) + find_package(LIBUMF REQUIRED libumf) +endif() + +pkg_check_modules(LIBHWLOC hwloc >= 2.3.0) +if(NOT LIBHWLOC_FOUND) + find_package(LIBHWLOC 2.3.0 REQUIRED hwloc) +endif() + +# build the example +set(EXAMPLE_NAME umf_example_ctl) +add_executable(${EXAMPLE_NAME} custom_ctl.c) +target_include_directories(${EXAMPLE_NAME} PRIVATE ${LIBUMF_INCLUDE_DIRS}) +target_link_directories( + ${ + EXAMPLE_NAME} + PRIVATE + ${ + LIBHWLOC_LIBRARY_DIRS}) +target_link_libraries(${EXAMPLE_NAME} PRIVATE ${LIBUMF_LIBRARIES} hwloc) + +add_test( + NAME ${EXAMPLE_NAME} + COMMAND ${EXAMPLE_NAME} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + +set_tests_properties(${EXAMPLE_NAME} PROPERTIES LABELS "example-standalone") + +if(LINUX) + # set LD_LIBRARY_PATH + set_property( + TEST ${EXAMPLE_NAME} + PROPERTY ENVIRONMENT_MODIFICATION + "LD_LIBRARY_PATH=path_list_append:" + "${LIBUMF_LIBRARY_DIRS};" + "LD_LIBRARY_PATH=path_list_append:${" + "LIBHWLOC_LIBRARY_DIRS}") +endif() + +set(EXAMPLE_NAME umf_example_ctl_statistics) +add_executable(${EXAMPLE_NAME} ctl.c) +target_include_directories(${EXAMPLE_NAME} PRIVATE ${LIBUMF_INCLUDE_DIRS}) +target_link_directories( + ${ + EXAMPLE_NAME} + PRIVATE + ${ + LIBHWLOC_LIBRARY_DIRS}) +target_link_libraries(${EXAMPLE_NAME} PRIVATE ${LIBUMF_LIBRARIES} hwloc) + +add_test( + NAME ${EXAMPLE_NAME} + COMMAND ${EXAMPLE_NAME} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + +set_tests_properties(${EXAMPLE_NAME} PROPERTIES LABELS "example-standalone") + +if(LINUX) + # set LD_LIBRARY_PATH + set_property( + TEST ${EXAMPLE_NAME} + PROPERTY ENVIRONMENT_MODIFICATION + "LD_LIBRARY_PATH=path_list_append:" + "${LIBUMF_LIBRARY_DIRS};" + "LD_LIBRARY_PATH=path_list_append:${" + "LIBHWLOC_LIBRARY_DIRS}") +endif() diff --git a/examples/ctl/ctl.c b/examples/ctl/ctl.c new file mode 100644 index 0000000000..1edba4b5e4 --- /dev/null +++ b/examples/ctl/ctl.c @@ -0,0 +1,288 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#include +#include + +// This example relies on the experimental CTL API, which may change without +// notice. +#include + +#include +#include +#include +#include +#include + +static void print_provider_stats(const char *stage, + umf_memory_provider_handle_t provider, + const char *provider_name) { + size_t outstanding = 0; + size_t peak = 0; + + umf_result_t res = + umfCtlGet("umf.provider.by_handle.{}.stats.allocated_memory", + &outstanding, sizeof(outstanding), provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, + "%s: failed to read provider allocated memory (error %d)\n", + stage, (int)res); + return; + } + + /* you can also pass any nodes through va args by using {} */ + res = umfCtlGet("umf.provider.by_handle.{}.stats.{}", &peak, sizeof(peak), + provider, "peak_memory"); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "%s: failed to read provider peak memory (error %d)\n", + stage, (int)res); + return; + } + + printf("%s (%s): outstanding=%zu bytes, peak=%zu bytes\n", stage, + provider_name, outstanding, peak); +} + +/* Provide tutorial guidance when disjoint pool counters require tracing. */ +static bool report_pool_stat_failure(const char *label, + int required_trace_level, + umf_result_t res) { + if (res == UMF_RESULT_SUCCESS) { + return false; + } + + if (res == UMF_RESULT_ERROR_NOT_SUPPORTED) { + int hint_level = required_trace_level; + const char *reason = "verbose logging"; + + if (hint_level <= 1) { + hint_level = 1; + reason = "slab statistics"; + } else if (hint_level == 2) { + reason = "allocation counters"; + } else { + hint_level = 3; + } + + fprintf( + stderr, + "Cannot read %s because disjoint pool tracing level %d is " + "required. " + "This example do not set pool_trace so you can enable it through " + "env variable.\n" + "Set UMF_CONF=\"umf.pool.default.disjoint.params.pool_trace=%d\" " + "before running to enable %s%s.\n", + label, hint_level, hint_level, reason, + hint_level < 3 ? " (level 3 also enables verbose logging)" : ""); + } else { + fprintf(stderr, "Failed to read %s (error %d)\n", label, (int)res); + } + + return true; +} + +static void print_pool_stat_by_handle(const char *label, + umf_memory_pool_handle_t pool, + const char *stat_node, + int required_trace_level) { + size_t value = 0; + /* Surround the {} placeholder with literal segments so CTL resolves + * whichever pool handle the allocator hands back. */ + umf_result_t res = umfCtlGet("umf.pool.by_handle.{}.stats.{}", &value, + sizeof(value), pool, stat_node); + if (report_pool_stat_failure(label, required_trace_level, res)) { + return; + } + + printf("%s: %zu\n", label, value); +} + +static void print_pool_bucket_stat_by_name(const char *label, + const char *pool_name, + size_t bucket_index, + const char *stat_node, + int required_trace_level) { + size_t value = 0; + /* Anchor the pool label with by_name while {} wildcards cover the ordinal + * and statistic nodes to highlight mixed selectors. */ + umf_result_t res = + umfCtlGet("umf.pool.by_name.{}.buckets.{}.stats.{}", &value, + sizeof(value), pool_name, bucket_index, stat_node); + + if (report_pool_stat_failure(label, required_trace_level, res)) { + return; + } + + printf("%s: %zu\n", label, value); +} + +#define pool_name "ctl_stats_pool" +int main(void) { + const size_t provider_allocation_size = 64 * 1024; + const size_t pool_allocation_size = 4096; + const char *provider_name = NULL; + void *pool_memory = NULL; + umf_result_t res = UMF_RESULT_SUCCESS; + + const umf_memory_provider_ops_t *provider_ops = umfOsMemoryProviderOps(); + umf_os_memory_provider_params_handle_t os_params = NULL; + umf_memory_provider_handle_t provider = NULL; + umf_disjoint_pool_params_handle_t disjoint_params = NULL; + umf_memory_pool_handle_t pool = NULL; + + res = umfOsMemoryProviderParamsCreate(&os_params); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, + "Failed to create OS memory provider params (error %d)\n", + (int)res); + return -1; + } + + res = umfMemoryProviderCreate(provider_ops, os_params, &provider); + umfOsMemoryProviderParamsDestroy(os_params); + os_params = NULL; + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to create OS memory provider (error %d)\n", + (int)res); + return -1; + } + + umfMemoryProviderGetName(provider, &provider_name); + + print_provider_stats("Provider stats before allocation", provider, + provider_name); + + void *provider_memory = NULL; + res = umfMemoryProviderAlloc(provider, provider_allocation_size, 0, + &provider_memory); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Provider allocation failed (error %d)\n", (int)res); + goto cleanup; + } + + print_provider_stats("Provider stats after allocation", provider, + provider_name); + + res = umfMemoryProviderFree(provider, provider_memory, + provider_allocation_size); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Provider free failed (error %d)\n", (int)res); + goto cleanup; + } + provider_memory = NULL; + + print_provider_stats("Provider stats after free", provider, provider_name); + + res = umfCtlExec("umf.provider.by_handle.{}.stats.peak_memory.reset", NULL, + 0, provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to reset provider peak memory (error %d)\n", + (int)res); + goto cleanup; + } + + print_provider_stats("Provider stats after peak reset", provider, + provider_name); + + const umf_memory_pool_ops_t *pool_ops = umfDisjointPoolOps(); + res = umfDisjointPoolParamsCreate(&disjoint_params); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to create disjoint pool params (error %d)\n", + (int)res); + goto cleanup; + } + + /* set name of the pool so we can easily ref it by using name */ + res = umfDisjointPoolParamsSetName(disjoint_params, pool_name); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to name disjoint pool (error %d)\n", (int)res); + goto cleanup; + } + + res = umfPoolCreate(pool_ops, provider, disjoint_params, 0, &pool); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to create disjoint pool (error %d)\n", + (int)res); + goto cleanup; + } + + pool_memory = umfPoolMalloc(pool, pool_allocation_size); + if (pool_memory == NULL) { + fprintf(stderr, "Disjoint pool allocation failed\n"); + goto cleanup; + } + + print_pool_stat_by_handle("Disjoint pool used_memory", pool, "used_memory", + 0); + print_pool_stat_by_handle("Disjoint pool curr_slabs_in_use", pool, + "curr_slabs_in_use", 1); + print_pool_stat_by_handle("Disjoint pool alloc_num", pool, "alloc_num", 2); + + size_t pool_name_count = 0; + + res = umfCtlGet("umf.pool.by_name.{}.count", &pool_name_count, + sizeof(pool_name_count), pool_name); + + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to read pool count (error %d)\n", (int)res); + goto cleanup; + } + printf("There is %zu pools with name %s\n", pool_name_count, pool_name); + size_t bucket_count = 0; + + // you can put pool_name directly in ctl string without {} if you want + res = umfCtlGet("umf.pool.by_name." pool_name ".buckets.count", + &bucket_count, sizeof(bucket_count)); + + for (size_t bucket = 0; bucket < bucket_count; bucket++) { + size_t bucket_size = 0; + // after pool name you can add pool index if there are multiple pools with + // the same name, if there is only one it is safe to omit it + // but still you can provide it + res = umfCtlGet("umf.pool.by_name.{}.0.buckets.{}.size", &bucket_size, + sizeof(bucket_size), pool_name, bucket); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to read bucket size (error %d)\n", + (int)res); + goto cleanup; + } + + if (bucket_size == pool_allocation_size) { + printf("Disjoint pool bucket[%zu] size: %zu bytes\n", bucket, + bucket_size); + print_pool_bucket_stat_by_name("Disjoint pool bucket alloc_num", + pool_name, bucket, "alloc_num", 2); + print_pool_bucket_stat_by_name( + "Disjoint pool bucket curr_slabs_in_use", pool_name, bucket, + "curr_slabs_in_use", 1); + goto cleanup; + } + } + +cleanup: + if (pool_memory) { + umfFree(pool_memory); + } + + if (pool) { + umfPoolDestroy(pool); + } + if (disjoint_params) { + umfDisjointPoolParamsDestroy(disjoint_params); + } + if (provider_memory) { + umfMemoryProviderFree(provider, provider_memory, + provider_allocation_size); + } + if (provider) { + umfMemoryProviderDestroy(provider); + } + + return 0; +} diff --git a/examples/ctl/custom_ctl.c b/examples/ctl/custom_ctl.c new file mode 100644 index 0000000000..83f5509940 --- /dev/null +++ b/examples/ctl/custom_ctl.c @@ -0,0 +1,320 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#define _GNU_SOURCE 1 +#include +#include +#include +#include + +// This example relies on the experimental CTL API, which may change without +// notice. +#include + +#include +#include + +#include +#include + +// Minimal memory provider demonstrating CTL integration + +// Provider state exposed via CTL +typedef struct ctl_provider_t { + int a, b, c, m; +} ctl_provider_t; + +static umf_result_t ctl_init(const void *params, void **provider) { + (void)params; + ctl_provider_t *p = calloc(1, sizeof(*p)); + if (!p) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + *provider = p; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_finalize(void *provider) { + free(provider); + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_alloc(void *provider, size_t size, size_t alignment, + void **ptr) { + (void)provider; + (void)alignment; + *ptr = malloc(size); + if (*ptr == NULL) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_free(void *provider, void *ptr, size_t size) { + (void)provider; + (void)size; + free(ptr); + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_get_last_native_error(void *provider, + const char **ppMessage, + int32_t *pError) { + (void)provider; + if (ppMessage) { + *ppMessage = NULL; + } + if (pError) { + *pError = 0; + } + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_get_recommended_page_size(void *provider, size_t size, + size_t *pageSize) { + (void)provider; + (void)size; + *pageSize = 4096; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_get_min_page_size(void *provider, const void *ptr, + size_t *pageSize) { + (void)provider; + (void)ptr; + *pageSize = 4096; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ctl_get_name(void *provider, const char **name) { + (void)provider; + if (name) { + *name = "ctl"; + } + return UMF_RESULT_SUCCESS; +} + +// Wildcards (`{}`) become extra args; convert them to `%s` for `vsnprintf`. +static void replace_braces_with_percent_s(const char *name, char *fmt, + size_t fmt_size) { + size_t i = 0, j = 0; + while (name[i] != '\0' && j < fmt_size - 1) { + if (name[i] == '{' && name[i + 1] == '}' && j < fmt_size - 2) { + fmt[j++] = '%'; + fmt[j++] = 's'; + i += 2; + } else { + fmt[j++] = name[i++]; + } + } + fmt[j] = '\0'; +} + +// Parse an integer from programmatic (binary) or configuration (string) input. +static umf_result_t parse_int(void *arg, size_t size, + umf_ctl_query_source_t source, int *out) { + if (!arg || !out) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (source == CTL_QUERY_PROGRAMMATIC) { + if (size != sizeof(int)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *out = *(int *)arg; + return UMF_RESULT_SUCCESS; + } else if (source == CTL_QUERY_CONFIG_INPUT) { + char *buf = malloc(size + 1); + if (!buf) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + memcpy(buf, arg, size); + buf[size] = '\0'; + *out = (int)strtol(buf, NULL, 10); + free(buf); + return UMF_RESULT_SUCCESS; + } + + return UMF_RESULT_ERROR_INVALID_ARGUMENT; +} + +// CTL callback interpreting provider-specific paths and actions +static umf_result_t ctl_ctl(void *provider, umf_ctl_query_source_t source, + const char *name, void *arg, size_t size, + umf_ctl_query_type_t queryType, va_list args) { + ctl_provider_t *p = (ctl_provider_t *)provider; + + char fmt[128]; + char formatted[128]; + replace_braces_with_percent_s(name, fmt, sizeof(fmt)); + va_list args_copy; + va_copy(args_copy, args); + vsnprintf(formatted, sizeof(formatted), fmt, args_copy); + va_end(args_copy); + + if (queryType == CTL_QUERY_RUNNABLE && + strcmp(formatted, "post_initialize") == 0) { + // Called once defaults have been loaded + printf("post_initialize: a=%d b=%d c=%d m=%d\n", p->a, p->b, p->c, + p->m); + return UMF_RESULT_SUCCESS; + } + + if (queryType == CTL_QUERY_WRITE && strcmp(formatted, "a") == 0) { + int val = 0; + umf_result_t ret = parse_int(arg, size, source, &val); + if (ret != UMF_RESULT_SUCCESS) { + return ret; + } + p->a = val; + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_WRITE && strcmp(formatted, "b") == 0) { + int val = 0; + umf_result_t ret = parse_int(arg, size, source, &val); + if (ret != UMF_RESULT_SUCCESS) { + return ret; + } + p->b = val; + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_WRITE && strcmp(formatted, "m") == 0) { + int val = 0; + umf_result_t ret = parse_int(arg, size, source, &val); + if (ret != UMF_RESULT_SUCCESS) { + return ret; + } + p->m = val; + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_RUNNABLE && strcmp(formatted, "addition") == 0) { + if (p->m) { + p->c = (p->a + p->b) % p->m; + } else { + p->c = p->a + p->b; + } + if (arg && size == sizeof(int)) { + *(int *)arg = p->c; + } + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_RUNNABLE && + strcmp(formatted, "subtraction") == 0) { + if (p->m) { + p->c = (p->a - p->b) % p->m; + } else { + p->c = p->a - p->b; + } + if (arg && size == sizeof(int)) { + *(int *)arg = p->c; + } + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_READ && strcmp(formatted, "c") == 0) { + if (arg == NULL || size != sizeof(int)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(int *)arg = p->c; + return UMF_RESULT_SUCCESS; + } + if (queryType == CTL_QUERY_READ && strcmp(formatted, "m") == 0) { + if (arg == NULL || size != sizeof(int)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(int *)arg = p->m; + return UMF_RESULT_SUCCESS; + } + + return UMF_RESULT_ERROR_INVALID_CTL_PATH; +} + +static umf_memory_provider_ops_t ctl_ops = { + .version = UMF_PROVIDER_OPS_VERSION_CURRENT, + .initialize = ctl_init, + .finalize = ctl_finalize, + .alloc = ctl_alloc, + .free = ctl_free, + .get_last_native_error = ctl_get_last_native_error, + .get_recommended_page_size = ctl_get_recommended_page_size, + .get_min_page_size = ctl_get_min_page_size, + .get_name = ctl_get_name, + .ext_ctl = ctl_ctl, // register CTL handler +}; + +int main(void) { + umf_result_t res; + umf_memory_provider_handle_t provider; + + // Create provider instance and wire in CTL callbacks + res = umfMemoryProviderCreate(&ctl_ops, NULL, &provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to create a memory provider!\n"); + return -1; + } + printf("ctl provider created at %p\n", (void *)provider); + // Defaults are now applied and `post_initialize` has run + + int a = 10; + int b = 7; + // Set variables via CTL; `{}` is replaced by the provider handle + res = umfCtlSet("umf.provider.by_handle.{}.a", &a, sizeof(a), provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to set a!\n"); + goto out; + } + res = umfCtlSet("umf.provider.by_handle.{}.b", &b, sizeof(b), provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to set b!\n"); + goto out; + } + int m = 0; + // Read optional modulus from config or environment you can use {} to replace any node + res = + umfCtlGet("umf.provider.by_handle.{}.{}", &m, sizeof(m), provider, "c"); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to get m!\n"); + goto out; + } + printf("using modulus m=%d\n", m); + + int result = 0; + + // Execute addition and fetch the result + res = umfCtlExec("umf.provider.by_handle.{}.addition", NULL, 0, provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to execute addition!\n"); + goto out; + } + res = umfCtlGet("umf.provider.by_handle.{}.c", &result, sizeof(result), + provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to get c!\n"); + goto out; + } + printf("addition result: %d\n", result); + + // Execute subtraction and fetch the result + res = + umfCtlExec("umf.provider.by_handle.{}.subtraction", NULL, 0, provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to execute subtraction!\n"); + goto out; + } + res = umfCtlGet("umf.provider.by_handle.{}.c", &result, sizeof(result), + provider); + if (res != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to get c!\n"); + goto out; + } + printf("subtraction result: %d\n", result); + +out: + umfMemoryProviderDestroy(provider); + return 0; +} diff --git a/examples/dram_and_fsdax/dram_and_fsdax.c b/examples/dram_and_fsdax/dram_and_fsdax.c index ad2d12392a..4486934d98 100644 --- a/examples/dram_and_fsdax/dram_and_fsdax.c +++ b/examples/dram_and_fsdax/dram_and_fsdax.c @@ -96,7 +96,7 @@ int main(void) { // - the UMF_TESTS_FSDAX_PATH environment variable to contain // a path to a file on this FSDAX device. char *path = getenv("UMF_TESTS_FSDAX_PATH"); - if (path == NULL || path[0] == 0) { + if (path == NULL || path[0] == '\0') { fprintf( stderr, "Warning: UMF_TESTS_FSDAX_PATH is not set, skipping testing ...\n"); diff --git a/include/umf/base.h b/include/umf/base.h index f7cd9de638..11d7b723b4 100644 --- a/include/umf/base.h +++ b/include/umf/base.h @@ -48,9 +48,38 @@ typedef enum umf_result_t { UMF_RESULT_ERROR_DEPENDENCY_UNAVAILABLE = 7, ///< External required dependency is unavailable or missing UMF_RESULT_ERROR_OUT_OF_RESOURCES = 8, ///< Out of internal resources - UMF_RESULT_ERROR_UNKNOWN = 0x7ffffffe ///< Unknown error + UMF_RESULT_ERROR_INVALID_CTL_PATH = + 9, ///< CTL path is not supported or not found + UMF_RESULT_ERROR_UNKNOWN = 0x7ffffffe ///< Unknown error } umf_result_t; +/// @brief Handle to the memory properties structure +typedef struct umf_memory_properties_t *umf_memory_properties_handle_t; + +/// @brief ID of the memory property +typedef enum umf_memory_property_id_t { + UMF_MEMORY_PROPERTY_INVALID = -1, ///< Invalid property + + // UMF specific + UMF_MEMORY_PROPERTY_PROVIDER_HANDLE = 0, ///< Handle to the memory provider + UMF_MEMORY_PROPERTY_POOL_HANDLE = 1, ///< Handle to the memory pool + + // generic pointer properties + UMF_MEMORY_PROPERTY_BASE_ADDRESS = 10, ///< Base address of the allocation + UMF_MEMORY_PROPERTY_BASE_SIZE = 11, ///< Base size of the allocation + UMF_MEMORY_PROPERTY_BUFFER_ID = 12, ///< Unique identifier for the buffer + + // GPU specific + UMF_MEMORY_PROPERTY_POINTER_TYPE = 20, ///< Type of the pointer + UMF_MEMORY_PROPERTY_CONTEXT = 21, ///< GPU context of the allocation + UMF_MEMORY_PROPERTY_DEVICE = + 22, ///< GPU device where the allocation resides + + /// @cond + UMF_MEMORY_PROPERTY_MAX_RESERVED = 0x1000, ///< Maximum reserved value + /// @endcond +} umf_memory_property_id_t; + /// @brief Type of the CTL query typedef enum umf_ctl_query_type { CTL_QUERY_READ, diff --git a/include/umf/experimental/ctl.h b/include/umf/experimental/ctl.h index 6af3532342..0f7ab860ae 100644 --- a/include/umf/experimental/ctl.h +++ b/include/umf/experimental/ctl.h @@ -17,34 +17,34 @@ extern "C" { #endif /// -/// @brief Get value of a specified attribute at the given name. -/// @param name name of an attribute to be retrieved +/// @brief Get value of a specified attribute at the given path. +/// @param path path of an attribute to be retrieved /// @param arg [out] pointer to the variable where the value will be stored /// @param size size of the value, depends on the context /// @param ... additional arguments that can be passed to the callback /// @return UMF_RESULT_SUCCESS on success or UMF_RESULT_ERROR_UNKNOWN on failure. /// -umf_result_t umfCtlGet(const char *name, void *arg, size_t size, ...); +umf_result_t umfCtlGet(const char *path, void *arg, size_t size, ...); /// -/// @brief Set value of a specified attribute at the given name. -/// @param name name of an attribute to be set +/// @brief Set value of a specified attribute at the given path. +/// @param path path of an attribute to be set /// @param arg [in] pointer to the value that will be set /// @param size [in] size of the value, depends on the context /// @param ... additional arguments that can be passed to the callback /// @return UMF_RESULT_SUCCESS on success or UMF_RESULT_ERROR_UNKNOWN on failure. /// -umf_result_t umfCtlSet(const char *name, void *arg, size_t size, ...); +umf_result_t umfCtlSet(const char *path, void *arg, size_t size, ...); /// /// @brief Execute callback related with the specified attribute. -/// @param name name of an attribute to be executed +/// @param path path of an attribute to be executed /// @param arg [in/out] pointer to the value, can be used as an input or output /// @param size [in] size of the value, depends on the context /// @param ... additional arguments that can be passed to the callback /// @return UMF_RESULT_SUCCESS on success or UMF_RESULT_ERROR_UNKNOWN on failure. /// -umf_result_t umfCtlExec(const char *name, void *arg, size_t size, ...); +umf_result_t umfCtlExec(const char *path, void *arg, size_t size, ...); #ifdef __cplusplus } diff --git a/include/umf/experimental/memory_properties.h b/include/umf/experimental/memory_properties.h new file mode 100644 index 0000000000..fe44ef592d --- /dev/null +++ b/include/umf/experimental/memory_properties.h @@ -0,0 +1,64 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#ifndef UMF_MEMORY_PROPERTIES_H +#define UMF_MEMORY_PROPERTIES_H 1 + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/// @brief Get the memory properties handle for a given pointer +/// \details +/// The handle returned by this function is valid until the memory pointed +/// to by the pointer is freed. +/// @param ptr pointer to the allocated memory +/// @param props_handle [out] pointer to the memory properties handle +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure +umf_result_t +umfGetMemoryPropertiesHandle(const void *ptr, + umf_memory_properties_handle_t *props_handle); + +/// @brief Get the size of a specific memory property +/// \details +/// The size of the property should be used to allocate a buffer to hold the +/// value of the property. +/// @param props_handle handle to the memory properties +/// @param memory_property_id ID of the memory property to get the size of +/// @param size [out] pointer to the size of the property +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure +umf_result_t +umfGetMemoryPropertySize(umf_memory_properties_handle_t props_handle, + umf_memory_property_id_t memory_property_id, + size_t *size); + +/// @brief Get a specific memory property from the properties handle +/// \details +/// The type of the property value depends on the property ID. The size of +/// the property value buffer must be large enough to hold the +/// value of the property. The size of the property can be obtained by +/// calling umfGetMemoryPropertySize() with the same property ID. +/// @param props_handle handle to the memory properties +/// @param memory_property_id ID of the memory property to get +/// @param property_value [out] pointer to the value of the memory property +/// which will be filled +/// @param max_property_size size of the property value buffer +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure +umf_result_t umfGetMemoryProperty(umf_memory_properties_handle_t props_handle, + umf_memory_property_id_t memory_property_id, + void *property_value, + size_t max_property_size); + +#ifdef __cplusplus +} +#endif + +#endif /* UMF_MEMORY_PROPERTIES_H */ diff --git a/include/umf/memory_pool.h b/include/umf/memory_pool.h index c405e6f615..5662684fa7 100644 --- a/include/umf/memory_pool.h +++ b/include/umf/memory_pool.h @@ -43,7 +43,6 @@ typedef enum umf_pool_create_flag_t { /// @brief Type for combinations of pool creation flags typedef uint32_t umf_pool_create_flags_t; -/// /// @brief Creates new memory pool. /// @param ops instance of umf_memory_pool_ops_t /// @param provider memory provider that will be used for coarse-grain allocations. @@ -196,6 +195,24 @@ umf_result_t umfPoolSetTag(umf_memory_pool_handle_t hPool, void *tag, /// @return UMF_RESULT_SUCCESS on success. umf_result_t umfPoolGetTag(umf_memory_pool_handle_t hPool, void **tag); +/// +/// @brief Trims memory of the pool, removing resources that are not needed +/// to keep the pool operational. +/// \details +/// The minBytesToKeep parameter is a hint to the pool implementation +/// that it should try to keep at least this number of bytes of +/// memory in the pool. The pool implementation may also ignore this +/// parameter and try to trim the whole memory, in which case it +/// should return UMF_RESULT_SUCCESS. The pool implementation may +/// also return UMF_RESULT_ERROR_NOT_SUPPORTED if it does not support +/// trimming memory. +/// @param hPool pointer to the memory pool +/// @param minBytesToKeep minimum number of bytes to keep in the pool (if +/// possible - see details) +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfPoolTrimMemory(umf_memory_pool_handle_t hPool, + size_t minBytesToKeep); + #ifdef __cplusplus } #endif diff --git a/include/umf/memory_pool_ops.h b/include/umf/memory_pool_ops.h index 4cba053195..8a16e8fc69 100644 --- a/include/umf/memory_pool_ops.h +++ b/include/umf/memory_pool_ops.h @@ -22,7 +22,7 @@ extern "C" { /// @brief Version of the Memory Pool ops structure. /// NOTE: This is equal to the latest UMF version, in which the ops structure /// has been modified. -#define UMF_POOL_OPS_VERSION_CURRENT UMF_MAKE_VERSION(1, 0) +#define UMF_POOL_OPS_VERSION_CURRENT UMF_MAKE_VERSION(1, 1) /// /// @brief This structure comprises function pointers used by corresponding umfPool* @@ -138,12 +138,17 @@ typedef struct umf_memory_pool_ops_t { /// /// * Implementations *must* return default pool name when NULL is provided, /// otherwise the pool's name is returned. - /// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. /// + /// * The returned name should not exceed 64 characters including null character and may contain + /// only [a-zA-Z0-9_-] characters. Names violating these rules are deprecated + /// and will not be supported in the next major API release. + /// CTL functionality may be limited if other characters are returned. + /// + /// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. umf_result_t (*get_name)(void *pool, const char **name); /// - /// The following function is optional and memory pool implementation + /// The following functions are optional and memory pool implementation /// can keep it NULL. /// @@ -161,11 +166,35 @@ typedef struct umf_memory_pool_ops_t { /// @param args variable arguments for the operation. /// /// @return umf_result_t result of the control operation. + /// Implementations must return + /// UMF_RESULT_ERROR_INVALID_CTL_PATH if the given path is not + /// supported. /// - umf_result_t (*ext_ctl)(void *hPool, umf_ctl_query_source_t source, + umf_result_t (*ext_ctl)(void *pool, umf_ctl_query_source_t source, const char *name, void *arg, size_t size, umf_ctl_query_type_t queryType, va_list args); + // The following operations were added in ops version 1.1 + + /// + /// @brief Trims memory of the pool, removing resources that are not needed + /// to keep the pool operational. + /// \details + /// The minBytesToKeep parameter is a hint to the pool implementation + /// that it should try to keep at least this number of bytes of + /// memory in the pool. The pool implementation may also ignore this + /// parameter and try to trim the whole memory, in which case it + /// should return UMF_RESULT_SUCCESS. The pool implementation may + /// also return UMF_RESULT_ERROR_NOT_SUPPORTED if it does not support + /// trimming memory. + /// @param pool pointer to the memory pool + /// @param minBytesToKeep minimum number of bytes to keep in the pool (if + /// possible - see details) + /// @return UMF_RESULT_SUCCESS on success or appropriate error code on + /// failure. + /// + umf_result_t (*ext_trim_memory)(void *pool, size_t minBytesToKeep); + } umf_memory_pool_ops_t; #ifdef __cplusplus diff --git a/include/umf/memory_provider_ops.h b/include/umf/memory_provider_ops.h index 16e1536fd9..80fb28860d 100644 --- a/include/umf/memory_provider_ops.h +++ b/include/umf/memory_provider_ops.h @@ -21,7 +21,7 @@ extern "C" { /// @brief Version of the Memory Provider ops structure. /// NOTE: This is equal to the latest UMF version, in which the ops structure /// has been modified. -#define UMF_PROVIDER_OPS_VERSION_CURRENT UMF_MAKE_VERSION(1, 0) +#define UMF_PROVIDER_OPS_VERSION_CURRENT UMF_MAKE_VERSION(1, 1) /// /// @brief This structure comprises function pointers used by corresponding @@ -123,8 +123,18 @@ typedef struct umf_memory_provider_ops_t { /// @brief Retrieve name of a given memory \p provider. /// @param provider pointer to the memory provider /// @param name [out] pointer to a string containing the name of the \p provider - /// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. + /// \details + /// * Implementations *must* return a literal null-terminated string. + /// + /// * Implementations *must* return default provider name when NULL is provided, + /// otherwise the pool's name is returned. /// + /// * The returned name should not exceed 64 characters and may contain + /// only [a-zA-Z0-9_-] characters. Names violating these rules are deprecated + /// and will not be supported in the next major API release. + /// CTL functionality may be limited if other characters are returned. + /// + /// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. umf_result_t (*get_name)(void *provider, const char **name); /// @@ -273,11 +283,49 @@ typedef struct umf_memory_provider_ops_t { /// @param args variable arguments for the operation. /// /// @return umf_result_t result of the control operation. + /// Implementations must return + /// UMF_RESULT_ERROR_INVALID_CTL_PATH if the given path is not + /// supported. /// umf_result_t (*ext_ctl)(void *provider, umf_ctl_query_source_t source, const char *name, void *arg, size_t size, umf_ctl_query_type_t queryType, va_list args); + // The following operations were added in ops version 1.1 + + /// + /// @brief Retrieve provider-specific properties of the memory allocation. + /// \details + /// If provider supports allocation properties, + /// ext_get_allocation_properties and ext_get_allocation_properties_size, + /// must either be all set or all NULL. + /// @param provider pointer to the memory provider + /// @param ptr pointer to the allocated memory + /// @param memory_property_id ID of the memory property + /// @param property_value [out] pointer to the value of the memory property + /// which will be filled + /// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure + /// + umf_result_t (*ext_get_allocation_properties)( + void *provider, const void *ptr, + umf_memory_property_id_t memory_property_id, void *property_value); + + /// + /// @brief Retrieve size of the provider-specific properties of the memory + /// allocation. + /// \details + /// If provider supports allocation properties, + /// ext_get_allocation_properties and ext_get_allocation_properties_size, + /// must either be all set or all NULL. + /// @param provider pointer to the memory provider + /// @param memory_property_id ID of the memory property to get the size of + /// @param size [out] pointer to the size of the property + /// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure + /// + umf_result_t (*ext_get_allocation_properties_size)( + void *provider, umf_memory_property_id_t memory_property_id, + size_t *size); + } umf_memory_provider_ops_t; #ifdef __cplusplus diff --git a/include/umf/pools/pool_disjoint.h b/include/umf/pools/pool_disjoint.h index c7032fd60b..1758fee894 100644 --- a/include/umf/pools/pool_disjoint.h +++ b/include/umf/pools/pool_disjoint.h @@ -109,7 +109,10 @@ umf_result_t umfDisjointPoolParamsSetSharedLimits( /// @brief Set custom name of the disjoint pool to be used in the traces. /// @param hParams handle to the parameters of the disjoint pool. -/// @param name custom name of the pool. Name longer than 64 characters will be truncated. +/// @param name custom name of the pool. Must not be NULL. Name longer than 63 +/// characters will be truncated. +/// \details Name should contain only [a-zA-Z0-9_-] characters. +/// Other names are deprecated and may limit CTL functionality. /// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. umf_result_t umfDisjointPoolParamsSetName(umf_disjoint_pool_params_handle_t hParams, diff --git a/include/umf/pools/pool_jemalloc.h b/include/umf/pools/pool_jemalloc.h index 8d5b090d6c..f7c881b6cb 100644 --- a/include/umf/pools/pool_jemalloc.h +++ b/include/umf/pools/pool_jemalloc.h @@ -43,6 +43,17 @@ umf_result_t umfJemallocPoolParamsSetNumArenas(umf_jemalloc_pool_params_handle_t hParams, size_t numArenas); +/// @brief Set custom name of the jemalloc pool used in traces. +/// @param hParams handle to the parameters of the jemalloc pool. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// \details Name should contain only [a-zA-Z0-9_-] characters. +/// Other names are deprecated and may limit CTL functionality. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t +umfJemallocPoolParamsSetName(umf_jemalloc_pool_params_handle_t hParams, + const char *name); + const umf_memory_pool_ops_t *umfJemallocPoolOps(void); #ifdef __cplusplus diff --git a/include/umf/pools/pool_scalable.h b/include/umf/pools/pool_scalable.h index f93e8d38e9..749cd8a398 100644 --- a/include/umf/pools/pool_scalable.h +++ b/include/umf/pools/pool_scalable.h @@ -53,6 +53,17 @@ umf_result_t umfScalablePoolParamsSetKeepAllMemory(umf_scalable_pool_params_handle_t hParams, bool keepAllMemory); +/// @brief Set custom name of the scalable pool used in traces. +/// @param hParams handle to the parameters of the scalable pool. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// \details Name should contain only [a-zA-Z0-9_-] characters. +/// Other names are deprecated and may limit CTL functionality. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t +umfScalablePoolParamsSetName(umf_scalable_pool_params_handle_t hParams, + const char *name); + /// @brief Return \p ops structure containing pointers to the scalable pool implementation. /// @return pointer to the \p umf_memory_pool_ops_t struct. const umf_memory_pool_ops_t *umfScalablePoolOps(void); diff --git a/include/umf/providers/provider_cuda.h b/include/umf/providers/provider_cuda.h index bbbabc2dea..dbed42219d 100644 --- a/include/umf/providers/provider_cuda.h +++ b/include/umf/providers/provider_cuda.h @@ -61,6 +61,16 @@ umf_result_t umfCUDAMemoryProviderParamsSetMemoryType( umf_result_t umfCUDAMemoryProviderParamsSetAllocFlags( umf_cuda_memory_provider_params_handle_t hParams, unsigned int flags); +/// @brief Set custom name of the CUDA Memory Provider. +/// @param hParams handle to the parameters of the CUDA Memory Provider. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// \details Name should contain only [a-zA-Z0-9_-] characters. +/// Other names are deprecated and may limit CTL functionality. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfCUDAMemoryProviderParamsSetName( + umf_cuda_memory_provider_params_handle_t hParams, const char *name); + const umf_memory_provider_ops_t *umfCUDAMemoryProviderOps(void); #ifdef __cplusplus diff --git a/include/umf/providers/provider_devdax_memory.h b/include/umf/providers/provider_devdax_memory.h index f8557f9a31..d57b1c5de2 100644 --- a/include/umf/providers/provider_devdax_memory.h +++ b/include/umf/providers/provider_devdax_memory.h @@ -56,6 +56,16 @@ umf_result_t umfDevDaxMemoryProviderParamsSetDeviceDax( umf_result_t umfDevDaxMemoryProviderParamsSetProtection( umf_devdax_memory_provider_params_handle_t hParams, unsigned protection); +/// @brief Set custom name of the Devdax Memory Provider. +/// @param hParams [in] handle to the parameters of the Devdax Memory Provider. +/// @param name [in] custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// \details Name should contain only [a-zA-Z0-9_-] characters. +/// Other names are deprecated and may limit CTL functionality. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfDevDaxMemoryProviderParamsSetName( + umf_devdax_memory_provider_params_handle_t hParams, const char *name); + /// @brief Devdax Memory Provider operation results typedef enum umf_devdax_memory_provider_native_error { UMF_DEVDAX_RESULT_SUCCESS = UMF_DEVDAX_RESULTS_START_FROM, ///< Success diff --git a/include/umf/providers/provider_file_memory.h b/include/umf/providers/provider_file_memory.h index 5d0c6eb169..5586cfe7c4 100644 --- a/include/umf/providers/provider_file_memory.h +++ b/include/umf/providers/provider_file_memory.h @@ -68,6 +68,16 @@ typedef enum umf_file_memory_provider_native_error { const umf_memory_provider_ops_t *umfFileMemoryProviderOps(void); +/// @brief Set custom name of the File Memory Provider. +/// @param hParams handle to the parameters of the File Memory Provider. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// \details Name should contain only [a-zA-Z0-9_-] characters. +/// Other names are deprecated and may limit CTL functionality. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfFileMemoryProviderParamsSetName( + umf_file_memory_provider_params_handle_t hParams, const char *name); + #ifdef __cplusplus } #endif diff --git a/include/umf/providers/provider_fixed_memory.h b/include/umf/providers/provider_fixed_memory.h index 7c4507a274..fcedd5c005 100644 --- a/include/umf/providers/provider_fixed_memory.h +++ b/include/umf/providers/provider_fixed_memory.h @@ -51,6 +51,16 @@ umf_result_t umfFixedMemoryProviderParamsDestroy( /// @return Pointer to the umf_memory_provider_ops_t structure. const umf_memory_provider_ops_t *umfFixedMemoryProviderOps(void); +/// @brief Set custom name of the Fixed Memory Provider. +/// @param hParams [in] handle to the parameters of the Fixed Memory Provider. +/// @param name [in] custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// \details Name should contain only [a-zA-Z0-9_-] characters. +/// Other names are deprecated and may limit CTL functionality. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfFixedMemoryProviderParamsSetName( + umf_fixed_memory_provider_params_handle_t hParams, const char *name); + /// @brief Fixed Memory Provider operation results typedef enum umf_fixed_memory_provider_native_error { UMF_FIXED_RESULT_SUCCESS = UMF_FIXED_RESULTS_START_FROM, ///< Success diff --git a/include/umf/providers/provider_level_zero.h b/include/umf/providers/provider_level_zero.h index 657e19ee3d..22c3513351 100644 --- a/include/umf/providers/provider_level_zero.h +++ b/include/umf/providers/provider_level_zero.h @@ -8,6 +8,8 @@ #ifndef UMF_LEVEL_ZERO_PROVIDER_H #define UMF_LEVEL_ZERO_PROVIDER_H +#include + #include #ifdef __cplusplus @@ -91,6 +93,27 @@ umf_result_t umfLevelZeroMemoryProviderParamsSetDeviceOrdinal( umf_level_zero_memory_provider_params_handle_t hParams, uint32_t deviceOrdinal); +/// @brief Set custom name of the Level Zero Memory Provider. +/// @param hParams handle to the parameters of the Level Zero Memory Provider. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// \details Name should contain only [a-zA-Z0-9_-] characters. +/// Other names are deprecated and may limit CTL functionality. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t umfLevelZeroMemoryProviderParamsSetName( + umf_level_zero_memory_provider_params_handle_t hParams, const char *name); + +/// @brief Adds or removes devices on which allocations should be made +/// resident. +/// @param provider handle to the memory provider +/// @param device device handle +/// @param is_adding Boolean indicating if peer is to be removed or added +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on +/// failure. +umf_result_t umfLevelZeroMemoryProviderResidentDeviceChange( + umf_memory_provider_handle_t provider, ze_device_handle_t device, + bool is_adding); + const umf_memory_provider_ops_t *umfLevelZeroMemoryProviderOps(void); #ifdef __cplusplus diff --git a/include/umf/providers/provider_os_memory.h b/include/umf/providers/provider_os_memory.h index 2629596094..93a20173cf 100644 --- a/include/umf/providers/provider_os_memory.h +++ b/include/umf/providers/provider_os_memory.h @@ -134,6 +134,17 @@ umf_result_t umfOsMemoryProviderParamsSetPartitions( umf_os_memory_provider_params_handle_t hParams, umf_numa_split_partition_t *partitions, unsigned partitions_len); +/// @brief Set custom name of the OS memory provider. +/// @param hParams handle to the parameters of the OS memory provider. +/// @param name custom name. Must not be NULL. Name longer than 63 characters +/// will be truncated. +/// \details Name should contain only [a-zA-Z0-9_-] characters. +/// Other names are deprecated and may limit CTL functionality. +/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure. +umf_result_t +umfOsMemoryProviderParamsSetName(umf_os_memory_provider_params_handle_t hParams, + const char *name); + /// @brief OS Memory Provider operation results typedef enum umf_os_memory_provider_native_error { UMF_OS_RESULT_SUCCESS = UMF_OS_RESULTS_START_FROM, ///< Success diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d11e04c4f5..45046fdc6d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -34,19 +34,20 @@ add_subdirectory(utils) add_subdirectory(base_alloc) add_subdirectory(coarse) -set(UMF_LIBS umf_utils umf_ba umf_coarse) - -set(HWLOC_DEPENDENT_SOURCES topology.c) +set(UMF_LIBS umf_utils umf_ba umf_coarse $) set(UMF_SOURCES ctl/ctl.c + ctl/ctl_defaults.c libumf.c ipc.c ipc_cache.c memory_pool.c + memory_properties.c memory_provider.c memory_provider_get_last_failed.c memtarget.c + memtargets/memtarget_numa.c mempolicy.c memspace.c memspaces/memspace_host_all.c @@ -66,7 +67,8 @@ set(UMF_SOURCES pool/pool_disjoint.c pool/pool_jemalloc.c pool/pool_proxy.c - pool/pool_scalable.c) + pool/pool_scalable.c + topology.c) if(UMF_POOL_JEMALLOC_ENABLED) set(UMF_LIBS ${UMF_LIBS} ${JEMALLOC_LIBRARIES}) @@ -78,16 +80,8 @@ if(UMF_POOL_JEMALLOC_ENABLED) "UMF_POOL_JEMALLOC_ENABLED=1") endif() -if(NOT UMF_DISABLE_HWLOC) - set(UMF_SOURCES ${UMF_SOURCES} ${HWLOC_DEPENDENT_SOURCES} - memtargets/memtarget_numa.c) - set(UMF_LIBS ${UMF_LIBS} $) - set(UMF_PRIVATE_LIBRARY_DIRS ${UMF_PRIVATE_LIBRARY_DIRS} - ${LIBHWLOC_LIBRARY_DIRS}) -else() - set(UMF_COMMON_COMPILE_DEFINITIONS ${UMF_COMMON_COMPILE_DEFINITIONS} - "UMF_NO_HWLOC=1") -endif() +set(UMF_PRIVATE_LIBRARY_DIRS ${UMF_PRIVATE_LIBRARY_DIRS} + ${LIBHWLOC_LIBRARY_DIRS}) set(UMF_SOURCES_LINUX libumf_linux.c) set(UMF_SOURCES_MACOSX libumf_linux.c) diff --git a/src/base_alloc/base_alloc.c b/src/base_alloc/base_alloc.c index 60126c9f0e..9f254e6758 100644 --- a/src/base_alloc/base_alloc.c +++ b/src/base_alloc/base_alloc.c @@ -128,7 +128,7 @@ static void ba_divide_memory_into_chunks(umf_ba_pool_t *pool, void *ptr, current_chunk->next = NULL; pool->metadata.free_list = ptr; // address of the first chunk - // mark the memory as unaccessible again + // mark the memory as inaccessible again utils_annotate_memory_inaccessible(ptr, size); } diff --git a/src/base_alloc/base_alloc_global.c b/src/base_alloc/base_alloc_global.c index abed0879f8..c7c4e22758 100644 --- a/src/base_alloc/base_alloc_global.c +++ b/src/base_alloc/base_alloc_global.c @@ -265,3 +265,19 @@ size_t umf_ba_global_malloc_usable_size(const void *ptr) { return usable_size; } + +char *umf_ba_global_strdup(const char *s) { + if (!s) { + return NULL; + } + + size_t len = strlen(s); + + char *ptr = umf_ba_global_alloc(len + 1); + if (!ptr) { + return NULL; + } + + memcpy(ptr, s, len + 1); + return ptr; +} diff --git a/src/base_alloc/base_alloc_global.h b/src/base_alloc/base_alloc_global.h index 4cec997255..71216ec6cf 100644 --- a/src/base_alloc/base_alloc_global.h +++ b/src/base_alloc/base_alloc_global.h @@ -22,6 +22,7 @@ void umf_ba_destroy_global(void); bool umf_ba_global_is_destroyed(void); size_t umf_ba_global_malloc_usable_size(const void *ptr); void *umf_ba_global_aligned_alloc(size_t size, size_t alignment); +char *umf_ba_global_strdup(const char *s); #ifdef __cplusplus } diff --git a/src/critnib/critnib.c b/src/critnib/critnib.c index 17a7d80be1..8ff5796684 100644 --- a/src/critnib/critnib.c +++ b/src/critnib/critnib.c @@ -1094,7 +1094,8 @@ int critnib_find(struct critnib *c, uintptr_t key, enum find_dir_t dir, * * If func() returns non-zero, the search is aborted. */ -static int iter(struct critnib_node *__restrict n, word min, word max, +static int iter(struct critnib_node *__restrict n, const word min, + const word max, int (*func)(word key, void *value, void *privdata), void *privdata) { if (is_leaf(n)) { @@ -1129,9 +1130,21 @@ static int iter(struct critnib_node *__restrict n, word min, word max, void critnib_iter(critnib *c, uintptr_t min, uintptr_t max, int (*func)(uintptr_t key, void *value, void *privdata), void *privdata) { + bool wasIterating = false; utils_mutex_lock(&c->mutex); if (c->root) { iter(c->root, min, max, func, privdata); + wasIterating = true; } utils_mutex_unlock(&c->mutex); + if (!wasIterating) { + LOG_DEBUG("there was no root, iterating critnib: %p was skipped", + (void *)c); + } +} + +void critnib_iter_all(critnib *c, + int (*func)(uintptr_t key, void *value, void *privdata), + void *privdata) { + critnib_iter(c, 0, (uintptr_t)-1, func, privdata); } diff --git a/src/critnib/critnib.h b/src/critnib/critnib.h index 690d75faef..c9f215d079 100644 --- a/src/critnib/critnib.h +++ b/src/critnib/critnib.h @@ -35,6 +35,10 @@ int critnib_insert(critnib *c, uintptr_t key, void *value, int update); void critnib_iter(critnib *c, uintptr_t min, uintptr_t max, int (*func)(uintptr_t key, void *value, void *privdata), void *privdata); +void critnib_iter_all(critnib *c, + int (*func)(uintptr_t key, void *value, void *privdata), + void *privdata); + int critnib_remove_release(critnib *c, uintptr_t key); /* diff --git a/src/ctl/ctl.c b/src/ctl/ctl.c index b1e9de9d81..2edca576a0 100644 --- a/src/ctl/ctl.c +++ b/src/ctl/ctl.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -27,14 +28,12 @@ #include "base_alloc/base_alloc_global.h" #include "ctl_internal.h" +#include "uthash/utlist.h" #include "utils/utils_common.h" #include "utils_log.h" -#include "utlist.h" #ifdef _WIN32 #define strtok_r strtok_s -#else -#include #endif #define MAX_CONFIG_FILE_LEN (1 << 20) /* 1 megabyte */ @@ -49,13 +48,25 @@ static int ctl_global_first_free = 0; static umf_ctl_node_t CTL_NODE(global)[CTL_MAX_ENTRIES]; +static void *(*ctl_malloc_fn)(size_t) = NULL; +static void (*ctl_free_fn)(void *) = NULL; + +void ctl_init(void *(*Malloc)(size_t), void (*Free)(void *)) { + if (Malloc) { + ctl_malloc_fn = Malloc; + } + if (Free) { + ctl_free_fn = Free; + } +} + typedef struct optional_umf_result_t { bool is_valid; umf_result_t value; } optional_umf_result_t; void *Zalloc(size_t sz) { - void *ptr = umf_ba_global_alloc(sz); + void *ptr = ctl_malloc_fn(sz); if (ptr) { memset(ptr, 0, sz); } @@ -64,7 +75,7 @@ void *Zalloc(size_t sz) { char *Strdup(const char *s) { size_t len = strlen(s) + 1; - char *p = umf_ba_global_alloc(len); + char *p = ctl_malloc_fn(len); if (p) { memcpy(p, s, len); } @@ -84,7 +95,7 @@ char *Strdup(const char *s) { } \ case CTL_ARG_TYPE_STRING: { \ char *str = va_arg(va, char *); \ - memcpy(output, str, ctl_argument->dest_size); \ + snprintf((char *)output, ctl_argument->dest_size, "%s", str); \ break; \ } \ case CTL_ARG_TYPE_INT: { \ @@ -97,9 +108,14 @@ char *Strdup(const char *s) { *(long long *)output = ll; \ break; \ } \ + case CTL_ARG_TYPE_UNSIGNED_LONG_LONG: { \ + unsigned long long ll = va_arg(va, unsigned long long); \ + *(unsigned long long *)output = ll; \ + break; \ + } \ case CTL_ARG_TYPE_PTR: { \ - void *p = va_arg(va, void *); \ - *(uintptr_t *)output = (uintptr_t)p; \ + void *ptr = va_arg(va, void *); \ + *(uintptr_t *)output = (uintptr_t)ptr; \ break; \ } \ default: \ @@ -121,9 +137,9 @@ static void ctl_delete_indexes(umf_ctl_index_utlist_t *indexes) { LL_DELETE(indexes, elem); if (elem) { if (elem->arg) { - umf_ba_global_free(elem->arg); + ctl_free_fn(elem->arg); } - umf_ba_global_free(elem); + ctl_free_fn(elem); } } } @@ -139,7 +155,7 @@ static void ctl_query_cleanup_real_args(const umf_ctl_node_t *n, void *real_arg, switch (source) { case CTL_QUERY_CONFIG_INPUT: - umf_ba_global_free(real_arg); + ctl_free_fn(real_arg); break; case CTL_QUERY_PROGRAMMATIC: break; @@ -153,7 +169,7 @@ static void ctl_query_cleanup_real_args(const umf_ctl_node_t *n, void *real_arg, * structure */ static void *ctl_parse_args(const struct ctl_argument *arg_proto, char *arg) { - char *dest_arg = umf_ba_global_alloc(arg_proto->dest_size); + char *dest_arg = ctl_malloc_fn(arg_proto->dest_size); if (dest_arg == NULL) { return NULL; } @@ -162,9 +178,6 @@ static void *ctl_parse_args(const struct ctl_argument *arg_proto, char *arg) { char *arg_sep = strtok_r(arg, CTL_VALUE_ARG_SEPARATOR, &sptr); for (const struct ctl_argument_parser *p = arg_proto->parsers; p->parser != NULL; ++p) { - if (arg_sep == NULL) { - goto error_parsing; - } if (p->parser(arg_sep, dest_arg + p->dest_offset, p->dest_size) != 0) { goto error_parsing; @@ -176,7 +189,7 @@ static void *ctl_parse_args(const struct ctl_argument *arg_proto, char *arg) { return dest_arg; error_parsing: - umf_ba_global_free(dest_arg); + ctl_free_fn(dest_arg); return NULL; } @@ -185,11 +198,13 @@ static void *ctl_parse_args(const struct ctl_argument *arg_proto, char *arg) { * structure as required by the node callback */ static void *ctl_query_get_real_args(const umf_ctl_node_t *n, void *write_arg, - umf_ctl_query_source_t source) { + umf_ctl_query_source_t source, + size_t *size) { void *real_arg = NULL; switch (source) { case CTL_QUERY_CONFIG_INPUT: real_arg = ctl_parse_args(n->arg, write_arg); + *size = n->arg->dest_size; break; case CTL_QUERY_PROGRAMMATIC: real_arg = write_arg; @@ -238,7 +253,7 @@ static umf_result_t ctl_exec_query_write(void *ctx, const umf_ctl_node_t *n, return UMF_RESULT_ERROR_INVALID_ARGUMENT; } - void *real_arg = ctl_query_get_real_args(n, arg, source); + void *real_arg = ctl_query_get_real_args(n, arg, source, &size); if (real_arg == NULL) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } @@ -283,26 +298,26 @@ ctl_exec_query_subtree(void *ctx, const umf_ctl_node_t *n, * ctl_find_and_execulte_node -- (internal) searches for a matching entry point in the * provided nodes * - * Name offset is used to return the offset of the name in the query string. + * Path offset is used to return the offset of the path in the query string. * The caller is responsible for freeing all of the allocated indexes, * regardless of the return value. */ static optional_umf_result_t ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, - umf_ctl_query_source_t source, const char *name, + umf_ctl_query_source_t source, const char *path, umf_ctl_query_type_t type, void *arg, size_t size, va_list args) { assert(nodes != NULL); - assert(name != NULL); + assert(path != NULL); const umf_ctl_node_t *n = NULL; optional_umf_result_t ret; - size_t name_offset = 0; + size_t path_offset = 0; ret.is_valid = true; ret.value = UMF_RESULT_SUCCESS; char *sptr = NULL; - char *parse_str = Strdup(name); + char *parse_str = Strdup(path); if (parse_str == NULL) { ret.is_valid = false; return ret; @@ -321,7 +336,7 @@ ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, */ while (node_name != NULL) { char *next_node = strtok_r(NULL, CTL_QUERY_NODE_SEPARATOR, &sptr); - name_offset = node_name - parse_str; + path_offset = node_name - parse_str; if (n != NULL && n->type == CTL_NODE_SUBTREE) { // if a subtree occurs, the subtree handler should be called break; @@ -364,31 +379,73 @@ ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, // if the node has an argument, but no next node, then it is an error goto error; } - void *node_arg; - if (strcmp(next_node, CTL_WILDCARD) == 0) { - if (source == CTL_QUERY_CONFIG_INPUT) { - LOG_ERR( - "ctl {} wildcard is not supported for config input"); - goto error; - } - // argument is a wildcard so we need to allocate it from va_list - node_arg = umf_ba_global_alloc(n->arg->dest_size); - if (node_arg == NULL) { - goto error; - } - pop_va_list(args, n->arg, node_arg); - } else { - node_arg = ctl_parse_args(n->arg, next_node); - if (node_arg == NULL) { - goto error; + char *node_arg = ctl_malloc_fn(n->arg->dest_size); + if (node_arg == NULL) { + goto error; + } + + // Parse this argument. It might contain "struct" which is series of fields separated by comma. + // each field contains separate parser in the parsers array. + for (const struct ctl_argument_parser *p = n->arg->parsers; + p->dest_size != 0; ++p) { + + if (next_node && strcmp(next_node, CTL_WILDCARD) == 0) { + if (source == CTL_QUERY_CONFIG_INPUT) { + ctl_free_fn(node_arg); + LOG_ERR("ctl {} wildcard is not supported for config " + "input"); + goto error; + } + + if (p->type == CTL_ARG_TYPE_UNKNOWN) { + ctl_free_fn(node_arg); + LOG_ERR("ctl {} wildcard is not supported for node: %s", + node_name); + goto error; + } + char *output = node_arg + p->dest_offset; + pop_va_list(args, p, output); + } else { + if (!p->parser) { + LOG_ERR( + "this node can be passed only as {} wildcard: %s", + next_node); + ctl_free_fn(node_arg); + goto error; + } + int r = p->parser(next_node, node_arg + p->dest_offset, + p->dest_size); + if (r < 0) { + // Parsing failed — cleanup and propagate error + ctl_free_fn(node_arg); + goto error; + } else if (r > 0) { + // Parser did not consume next_node, which means this argument is optional + // and not present. Optional arguments are always at the end of the expected + // sequence, so we can safely stop parsing here. + // + // Example: + // Given two paths: + // "umf.pool.by_name.name.stats.allocs" + // "umf.pool.by_name.name.1.stats.allocs" + // The parser for 'by_name' expects the next node is string followed by optional + // integer index, if its sees "stats" instead of integer, like in second example + // it will return >0 to signal that the optional + // integer argument is not present. + // This allows the remaining nodes ("stats.allocs") to be parsed normally + // without treating "stats" as part of 'by_name'. + break; + } } + // we parsed next_node as an argument so we next one + next_node = strtok_r(NULL, CTL_QUERY_NODE_SEPARATOR, &sptr); } umf_ctl_index_utlist_t *entry = NULL; - entry = umf_ba_global_alloc(sizeof(*entry)); + entry = ctl_malloc_fn(sizeof(*entry)); if (entry == NULL) { - umf_ba_global_free(arg); + ctl_free_fn(node_arg); goto error; } @@ -397,8 +454,7 @@ ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, entry->arg_size = n->arg->dest_size; LL_APPEND(indexes, entry); - // we parsed next_node as an argument so we next one - next_node = strtok_r(NULL, CTL_QUERY_NODE_SEPARATOR, &sptr); + if (next_node == NULL) { // last node was a node with arg, but there is no next mode. // check if there is nameless leaf on next level @@ -436,7 +492,7 @@ ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, // if the appropriate node (leaf or subtree) is not found, then return error if (n == NULL || (n->type != CTL_NODE_LEAF && n->type != CTL_NODE_SUBTREE)) { - ret.value = UMF_RESULT_ERROR_INVALID_ARGUMENT; + ret.value = UMF_RESULT_ERROR_INVALID_CTL_PATH; goto out; } @@ -444,7 +500,7 @@ ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, // if the node is a subtree, then we need to call the subtree handler ret.value = ctl_exec_query_subtree(ctx, n, source, arg, size, indexes->next, - name + name_offset, type, args); + path + path_offset, type, args); } else { switch (type) { case CTL_QUERY_READ: @@ -462,26 +518,26 @@ ctl_find_and_execute_node(const umf_ctl_node_t *nodes, void *ctx, } } out: - umf_ba_global_free(parse_str); + ctl_free_fn(parse_str); ctl_delete_indexes(indexes); return ret; error: ctl_delete_indexes(indexes); - umf_ba_global_free(parse_str); + ctl_free_fn(parse_str); ret.is_valid = false; return ret; } /* - * ctl_query -- (internal) parses the name and calls the appropriate methods + * ctl_query -- (internal) parses the path and calls the appropriate methods * from the ctl tree */ umf_result_t ctl_query(struct ctl *ctl, void *ctx, - umf_ctl_query_source_t source, const char *name, + umf_ctl_query_source_t source, const char *path, umf_ctl_query_type_t type, void *arg, size_t size, va_list args) { - if (name == NULL) { + if (path == NULL) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } @@ -489,16 +545,16 @@ umf_result_t ctl_query(struct ctl *ctl, void *ctx, va_copy(args_copy, args); optional_umf_result_t ret = ctl_find_and_execute_node( - CTL_NODE(global), ctx, source, name, type, arg, size, args_copy); + CTL_NODE(global), ctx, source, path, type, arg, size, args_copy); if (ret.is_valid == false && ctl) { - ret = ctl_find_and_execute_node(ctl->root, ctx, source, name, type, arg, + ret = ctl_find_and_execute_node(ctl->root, ctx, source, path, type, arg, size, args); } va_end(args_copy); - return ret.is_valid ? ret.value : UMF_RESULT_ERROR_INVALID_ARGUMENT; + return ret.is_valid ? ret.value : UMF_RESULT_ERROR_INVALID_CTL_PATH; } /* @@ -517,16 +573,16 @@ void ctl_register_module_node(struct ctl *c, const char *name, /* * ctl_parse_query -- (internal) splits an entire query string - * into name and value + * into path and value */ -static int ctl_parse_query(char *qbuf, char **name, char **value) { +static int ctl_parse_query(char *qbuf, char **path, char **value) { if (qbuf == NULL) { return -1; } char *sptr = NULL; - *name = strtok_r(qbuf, CTL_NAME_VALUE_SEPARATOR, &sptr); - if (*name == NULL) { + *path = strtok_r(qbuf, CTL_NAME_VALUE_SEPARATOR, &sptr); + if (*path == NULL) { return -1; } @@ -552,21 +608,21 @@ static umf_result_t ctl_load_config_helper(struct ctl *ctl, void *ctx, char *buf, ...) { umf_result_t ret = UMF_RESULT_SUCCESS; char *sptr = NULL; /* for internal use of strtok */ - char *name; + char *path; char *value; char *qbuf = strtok_r(buf, CTL_STRING_QUERY_SEPARATOR, &sptr); va_list empty_args; va_start(empty_args, buf); while (qbuf != NULL) { - int parse_res = ctl_parse_query(qbuf, &name, &value); + int parse_res = ctl_parse_query(qbuf, &path, &value); if (parse_res != 0) { ret = UMF_RESULT_ERROR_INVALID_ARGUMENT; goto end; } // we do not need to copy va_list before call as we know that for query_config_input // ctl_query will not call va_arg on it. Ref 7.15/3 of C99 standard - ret = ctl_query(ctl, ctx, CTL_QUERY_CONFIG_INPUT, name, CTL_QUERY_WRITE, - value, 0, empty_args); + ret = ctl_query(ctl, ctx, CTL_QUERY_CONFIG_INPUT, path, CTL_QUERY_WRITE, + value, strlen(value) + 1, empty_args); if (ret != UMF_RESULT_SUCCESS && ctx != NULL) { goto end; @@ -599,7 +655,7 @@ umf_result_t ctl_load_config_from_string(struct ctl *ctl, void *ctx, umf_result_t ret = ctl_load_config(ctl, ctx, buf); - umf_ba_global_free(buf); + ctl_free_fn(buf); return ret; } @@ -609,7 +665,6 @@ umf_result_t ctl_load_config_from_string(struct ctl *ctl, void *ctx, * This function opens up the config file, allocates a buffer of size equal to * the size of the file, reads its content and sanitizes it for ctl_load_config. */ -#ifndef _WIN32 // TODO: implement for Windows umf_result_t ctl_load_config_from_file(struct ctl *ctl, void *ctx, const char *cfg_file) { umf_result_t ret = UMF_RESULT_ERROR_UNKNOWN; @@ -661,13 +716,28 @@ umf_result_t ctl_load_config_from_file(struct ctl *ctl, void *ctx, ret = ctl_load_config(ctl, ctx, buf); - umf_ba_global_free(buf); + ctl_free_fn(buf); error_file_parse: (void)fclose(fp); return ret; } -#endif + +/* + * ctl_parse_ull -- (internal) parses and returns an unsigned long long + */ +static unsigned long long ctl_parse_ull(const char *str) { + char *endptr; + int olderrno = errno; + errno = 0; + unsigned long long val = strtoull(str, &endptr, 0); + if (endptr == str || errno != 0) { + return ULLONG_MAX; + } + errno = olderrno; + + return val; +} /* * ctl_parse_ll -- (internal) parses and returns a long long signed integer @@ -692,6 +762,9 @@ static long long ctl_parse_ll(const char *str) { int ctl_arg_boolean(const void *arg, void *dest, size_t dest_size) { /* suppress unused-parameter errors */ (void)dest_size; + if (!arg) { + return -1; + } int *intp = dest; char in = ((const char *)arg)[0]; @@ -707,10 +780,49 @@ int ctl_arg_boolean(const void *arg, void *dest, size_t dest_size) { return -1; } +/* + * ctl_arg_unsigned -- parses unsigned integer argument + */ +int ctl_arg_unsigned(const void *arg, void *dest, size_t dest_size) { + if (!arg) { + return -1; + } + + unsigned long long val = ctl_parse_ull(arg); + if (val == ULLONG_MAX) { + return -1; + } + + switch (dest_size) { + case sizeof(unsigned int): + if (val > UINT_MAX) { + return -1; + } + *(unsigned int *)dest = (unsigned int)val; + break; + case sizeof(unsigned long long): + *(unsigned long long *)dest = val; + break; + case sizeof(uint8_t): + if (val > UINT8_MAX) { + return -1; + } + *(uint8_t *)dest = (uint8_t)val; + break; + default: + return -1; + } + + return 0; +} + /* * ctl_arg_integer -- parses signed integer argument */ int ctl_arg_integer(const void *arg, void *dest, size_t dest_size) { + if (!arg) { + return -1; + } long long val = ctl_parse_ll(arg); if (val == LLONG_MIN) { return -1; @@ -726,12 +838,6 @@ int ctl_arg_integer(const void *arg, void *dest, size_t dest_size) { case sizeof(long long): *(long long *)dest = val; break; - case sizeof(uint8_t): - if (val > UINT8_MAX || val < 0) { - return -1; - } - *(uint8_t *)dest = (uint8_t)val; - break; default: return -1; } @@ -744,6 +850,10 @@ int ctl_arg_integer(const void *arg, void *dest, size_t dest_size) { * buffer */ int ctl_arg_string(const void *arg, void *dest, size_t dest_size) { + if (!arg) { + return -1; + } + /* check if the incoming string is longer or equal to dest_size */ if (strnlen(arg, dest_size) == dest_size) { return -1; diff --git a/src/ctl/ctl_defaults.c b/src/ctl/ctl_defaults.c new file mode 100644 index 0000000000..57799d30d2 --- /dev/null +++ b/src/ctl/ctl_defaults.c @@ -0,0 +1,146 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include + +#include "base_alloc_global.h" +#include "ctl_defaults.h" +#include "utils_concurrency.h" +#include "utils_log.h" +#include "utlist.h" + +static umf_result_t default_ctl_helper(ctl_ext_ctl_fn fn, void *ctl, + umf_ctl_query_source_t source, + const char *name, void *arg, size_t size, + ...) { + va_list empty_args; + va_start(empty_args, size); + umf_result_t ret = + fn(ctl, source, name, arg, size, CTL_QUERY_WRITE, empty_args); + va_end(empty_args); + return ret; +} + +umf_result_t ctl_default_subtree(ctl_default_entry_t **list, utils_mutex_t *mtx, + umf_ctl_query_source_t source, void *arg, + size_t size, const char *extra_name, + umf_ctl_query_type_t queryType) { + (void)source; + if (strstr(extra_name, "{}") != NULL) { + LOG_ERR("%s, default setting does not support wildcard parameters {}", + extra_name); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + + utils_mutex_lock(mtx); + + ctl_default_entry_t *entry = NULL; + LL_FOREACH(*list, entry) { + if (strcmp(entry->name, extra_name) == 0) { + break; + } + } + + if (queryType == CTL_QUERY_WRITE) { + bool is_new_entry = false; + if (!entry) { + entry = umf_ba_global_alloc(sizeof(*entry)); + if (!entry) { + utils_mutex_unlock(mtx); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + entry->name = NULL; + entry->value = NULL; + entry->next = NULL; + is_new_entry = true; + } + + char *new_name = umf_ba_global_strdup(extra_name); + if (!new_name) { + utils_mutex_unlock(mtx); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + + if (entry->name) { + umf_ba_global_free(entry->name); + } + entry->name = new_name; + + void *new_value = NULL; + if (size > 0) { + new_value = umf_ba_global_alloc(size); + if (!new_value) { + utils_mutex_unlock(mtx); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + memcpy(new_value, arg, size); + } + if (entry->value) { + umf_ba_global_free(entry->value); + } + entry->value = new_value; + entry->value_size = size; + entry->source = source; + + if (is_new_entry) { + LL_APPEND(*list, entry); + } + } else if (queryType == CTL_QUERY_READ) { + if (!entry) { + LOG_WARN("Wrong path name: %s", extra_name); + utils_mutex_unlock(mtx); + return UMF_RESULT_ERROR_INVALID_CTL_PATH; + } + + if (entry->value_size > size) { + LOG_ERR("Provided buffer size %zu is smaller than field size %zu", + size, entry->value_size); + utils_mutex_unlock(mtx); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + memcpy(arg, entry->value, entry->value_size); + } + + utils_mutex_unlock(mtx); + return UMF_RESULT_SUCCESS; +} + +void ctl_default_apply(ctl_default_entry_t *list, const char *pname, + ctl_ext_ctl_fn ext_ctl, void *priv) { + if (!pname || !ext_ctl) { + return; + } + + size_t pname_len = strlen(pname); + ctl_default_entry_t *it = NULL; + LL_FOREACH(list, it) { + if (strlen(it->name) > pname_len + 1 && + strncmp(it->name, pname, pname_len) == 0 && + it->name[pname_len] == '.') { + const char *ctl_name = it->name + pname_len + 1; + default_ctl_helper(ext_ctl, priv, it->source, ctl_name, it->value, + it->value_size); + } + } +} + +void ctl_default_destroy(ctl_default_entry_t **list, utils_mutex_t *mtx) { + utils_mutex_lock(mtx); + ctl_default_entry_t *entry = NULL, *tmp = NULL; + LL_FOREACH_SAFE(*list, entry, tmp) { + LL_DELETE(*list, entry); + if (entry->name) { + umf_ba_global_free(entry->name); + } + if (entry->value) { + umf_ba_global_free(entry->value); + } + umf_ba_global_free(entry); + } + utils_mutex_unlock(mtx); +} diff --git a/src/ctl/ctl_defaults.h b/src/ctl/ctl_defaults.h new file mode 100644 index 0000000000..1da73692ee --- /dev/null +++ b/src/ctl/ctl_defaults.h @@ -0,0 +1,51 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef UMF_CTL_DEFAULTS_H +#define UMF_CTL_DEFAULTS_H 1 + +#include +#include + +#include + +#include "ctl_internal.h" +#include "utils_concurrency.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ctl_default_entry_t { + char *name; + void *value; + size_t value_size; + umf_ctl_query_source_t source; + struct ctl_default_entry_t *next; +} ctl_default_entry_t; + +umf_result_t ctl_default_subtree(ctl_default_entry_t **list, utils_mutex_t *mtx, + umf_ctl_query_source_t source, void *arg, + size_t size, const char *extra_name, + umf_ctl_query_type_t queryType); + +typedef umf_result_t (*ctl_ext_ctl_fn)(void *obj, umf_ctl_query_source_t source, + const char *name, void *arg, size_t size, + umf_ctl_query_type_t queryType, + va_list args); + +void ctl_default_apply(ctl_default_entry_t *list, const char *pname, + ctl_ext_ctl_fn ext_ctl, void *priv); + +void ctl_default_destroy(ctl_default_entry_t **list, utils_mutex_t *mtx); + +#ifdef __cplusplus +} +#endif + +#endif /* UMF_CTL_DEFAULTS_H */ diff --git a/src/ctl/ctl_internal.h b/src/ctl/ctl_internal.h index a45fa7732e..fe6ea2884a 100644 --- a/src/ctl/ctl_internal.h +++ b/src/ctl/ctl_internal.h @@ -56,37 +56,39 @@ enum ctl_node_type { typedef int (*ctl_arg_parser)(const void *arg, void *dest, size_t dest_size); -struct ctl_argument_parser { - size_t dest_offset; /* offset of the field inside of the argument */ - size_t dest_size; /* size of the field inside of the argument */ - ctl_arg_parser parser; -}; typedef enum ctl_arg_type { CTL_ARG_TYPE_UNKNOWN = 0, CTL_ARG_TYPE_BOOLEAN, CTL_ARG_TYPE_STRING, CTL_ARG_TYPE_INT, CTL_ARG_TYPE_LONG_LONG, + CTL_ARG_TYPE_UNSIGNED_LONG_LONG, CTL_ARG_TYPE_PTR, MAX_CTL_ARG_TYPE } ctl_arg_type_t; +struct ctl_argument_parser { + size_t dest_offset; /* offset of the field inside of the argument */ + size_t dest_size; /* size of the field inside of the argument */ + ctl_arg_type_t type; /* type of the argument */ + ctl_arg_parser parser; +}; + struct ctl_argument { - size_t dest_size; /* size of the entire argument */ - ctl_arg_type_t type; /* type of the argument */ - struct ctl_argument_parser parsers[]; /* array of 'fields' in arg */ + size_t dest_size; /* size of the entire argument */ + struct ctl_argument_parser parsers[8]; /* array of 'fields' in arg */ }; -#define sizeof_member(t, m) sizeof(((t *)0)->m) +#define sizeof_member(type, member) sizeof(((type *)0)->member) -#define CTL_ARG_PARSER(t, p) \ - { 0, sizeof(t), p } +#define CTL_ARG_PARSER(type, vaarg_type, parser) \ + { 0, sizeof(type), vaarg_type, parser } -#define CTL_ARG_PARSER_STRUCT(t, m, p) \ - { offsetof(t, m), sizeof_member(t, m), p } +#define CTL_ARG_PARSER_STRUCT(type, member, vaarg_type, parser) \ + { offsetof(type, member), sizeof_member(type, member), vaarg_type, parser } #define CTL_ARG_PARSER_END \ - { 0, 0, NULL } + { 0, 0, 0, NULL } /* * CTL Tree node structure, do not use directly. All the necessary functionality @@ -121,7 +123,7 @@ struct ctl { int first_free; }; -void initialize_global_ctl(void); +void ctl_init(void *(*Malloc)(size_t), void (*Free)(void *)); umf_result_t ctl_load_config_from_string(struct ctl *ctl, void *ctx, const char *cfg_string); @@ -133,39 +135,55 @@ void ctl_register_module_node(struct ctl *c, const char *name, struct ctl_node *n); int ctl_arg_boolean(const void *arg, void *dest, size_t dest_size); +int ctl_arg_integer(const void *arg, void *dest, size_t dest_size); +int ctl_arg_unsigned(const void *arg, void *dest, size_t dest_size); +int ctl_arg_string(const void *arg, void *dest, size_t dest_size); + #define CTL_ARG_BOOLEAN \ { \ - sizeof(int), CTL_ARG_TYPE_BOOLEAN, { \ - {0, sizeof(int), ctl_arg_boolean}, CTL_ARG_PARSER_END \ + sizeof(int), { \ + {0, sizeof(int), CTL_ARG_TYPE_BOOLEAN, ctl_arg_boolean}, \ + CTL_ARG_PARSER_END \ } \ } -int ctl_arg_integer(const void *arg, void *dest, size_t dest_size); #define CTL_ARG_INT \ { \ - sizeof(int), CTL_ARG_TYPE_INT, { \ - {0, sizeof(int), ctl_arg_integer}, CTL_ARG_PARSER_END \ + sizeof(int), { \ + {0, sizeof(int), CTL_ARG_TYPE_INT, ctl_arg_integer}, \ + CTL_ARG_PARSER_END \ } \ } #define CTL_ARG_LONG_LONG \ { \ - sizeof(long long), CTL_ARG_TYPE_LONG_LONG, { \ - {0, sizeof(long long), ctl_arg_integer}, CTL_ARG_PARSER_END \ + sizeof(long long), { \ + {0, sizeof(long long), CTL_ARG_TYPE_LONG_LONG, ctl_arg_integer}, \ + CTL_ARG_PARSER_END \ + } \ + } + +#define CTL_ARG_UNSIGNED_LONG_LONG \ + { \ + sizeof(unsigned long long), { \ + {0, sizeof(unsigned long long), CTL_ARG_TYPE_UNSIGNED_LONG_LONG, \ + ctl_arg_unsigned}, \ + CTL_ARG_PARSER_END \ } \ } -int ctl_arg_string(const void *arg, void *dest, size_t dest_size); #define CTL_ARG_STRING(len) \ { \ - len, CTL_ARG_TYPE_PTR, { \ - {0, len, ctl_arg_string}, CTL_ARG_PARSER_END \ + len, { \ + {0, len, CTL_ARG_TYPE_STRING, ctl_arg_string}, CTL_ARG_PARSER_END \ } \ } #define CTL_ARG_PTR \ { \ - sizeof(void *), CTL_ARG_TYPE_PTR, { {0, 0, NULL}, CTL_ARG_PARSER_END } \ + sizeof(void *), { \ + {0, sizeof(void *), CTL_ARG_TYPE_PTR, NULL}, CTL_ARG_PARSER_END \ + } \ } #define _CTL_STR(name) #name @@ -180,7 +198,7 @@ int ctl_arg_string(const void *arg, void *dest, size_t dest_size); #define CTL_NODE(name, ...) ctl_node_##__VA_ARGS__##_##name umf_result_t ctl_query(struct ctl *ctl, void *ctx, - umf_ctl_query_source_t source, const char *name, + umf_ctl_query_source_t source, const char *path, umf_ctl_query_type_t type, void *arg, size_t size, va_list args); diff --git a/src/ipc.c b/src/ipc.c index d4e5cc8066..29ed5bac20 100644 --- a/src/ipc.c +++ b/src/ipc.c @@ -58,14 +58,19 @@ umf_result_t umfGetIPCHandle(const void *ptr, umf_ipc_handle_t *umfIPCHandle, } size_t ipcHandleSize = 0; - umf_alloc_info_t allocInfo; - umf_result_t ret = umfMemoryTrackerGetAllocInfo(ptr, &allocInfo); + umf_memory_properties_handle_t props = NULL; + umf_result_t ret = umfGetMemoryPropertiesHandle(ptr, &props); if (ret != UMF_RESULT_SUCCESS) { - LOG_ERR("cannot get alloc info for ptr = %p.", ptr); + LOG_ERR("cannot get alloc props for ptr = %p.", ptr); return ret; } - ret = umfPoolGetIPCHandleSize(allocInfo.pool, &ipcHandleSize); + if (props == NULL || props->pool == NULL) { + LOG_ERR("cannot get pool from alloc info for ptr = %p.", ptr); + return UMF_RESULT_ERROR_UNKNOWN; + } + + ret = umfPoolGetIPCHandleSize(props->pool, &ipcHandleSize); if (ret != UMF_RESULT_SUCCESS) { LOG_ERR("cannot get IPC handle size."); return ret; @@ -79,11 +84,14 @@ umf_result_t umfGetIPCHandle(const void *ptr, umf_ipc_handle_t *umfIPCHandle, // We cannot use umfPoolGetMemoryProvider function because it returns // upstream provider but we need tracking one - umf_memory_provider_handle_t provider = allocInfo.pool->provider; - assert(provider); + if (props->pool->provider == NULL) { + LOG_ERR("cannot get memory provider from pool"); + umf_ba_global_free(ipcData); + return UMF_RESULT_ERROR_UNKNOWN; + } + umf_memory_provider_handle_t provider = props->pool->provider; - ret = umfMemoryProviderGetIPCHandle(provider, allocInfo.base, - allocInfo.baseSize, + ret = umfMemoryProviderGetIPCHandle(provider, props->base, props->base_size, (void *)ipcData->providerIpcData); if (ret != UMF_RESULT_SUCCESS) { LOG_ERR("failed to get IPC handle."); @@ -92,10 +100,10 @@ umf_result_t umfGetIPCHandle(const void *ptr, umf_ipc_handle_t *umfIPCHandle, } // ipcData->handle_id is filled by tracking provider - ipcData->base = allocInfo.base; + ipcData->base = props->base; ipcData->pid = utils_getpid(); - ipcData->baseSize = allocInfo.baseSize; - ipcData->offset = (uintptr_t)ptr - (uintptr_t)allocInfo.base; + ipcData->baseSize = props->base_size; + ipcData->offset = (uintptr_t)ptr - (uintptr_t)props->base; *umfIPCHandle = ipcData; *size = ipcHandleSize; diff --git a/src/libumf.c b/src/libumf.c index 4ccde9bb4f..ab6be8704a 100644 --- a/src/libumf.c +++ b/src/libumf.c @@ -12,7 +12,9 @@ #include #include "base_alloc_global.h" +#include "ctl/ctl_internal.h" #include "ipc_cache.h" +#include "libumf.h" #include "memory_pool_internal.h" #include "memory_provider_internal.h" #include "memspace_internal.h" @@ -20,12 +22,10 @@ #include "provider_cuda_internal.h" #include "provider_level_zero_internal.h" #include "provider_tracking.h" +#include "topology.h" #include "utils_common.h" #include "utils_concurrency.h" #include "utils_log.h" -#if !defined(UMF_NO_HWLOC) -#include "topology.h" -#endif umf_memory_tracker_handle_t TRACKER = NULL; @@ -36,9 +36,25 @@ static UTIL_ONCE_FLAG initMutexOnce = UTIL_ONCE_FLAG_INIT; static void initialize_init_mutex(void) { utils_mutex_init(&initMutex); } static umf_ctl_node_t CTL_NODE(umf)[] = {CTL_CHILD(provider), CTL_CHILD(pool), - CTL_NODE_END}; + CTL_CHILD(logger), CTL_NODE_END}; -void initialize_global_ctl(void) { CTL_REGISTER_MODULE(NULL, umf); } +void initialize_ctl(void) { + ctl_init(umf_ba_global_alloc, umf_ba_global_free); + + CTL_REGISTER_MODULE(NULL, umf); + const char *env_var = getenv("UMF_CONF"); + if (env_var && env_var[0] != '\0') { + LOG_INFO("Loading UMF configuration from environment variable: %s", + env_var); + ctl_load_config_from_string(NULL, NULL, env_var); + } + + const char *file_var = getenv("UMF_CONF_FILE"); + if (file_var && file_var[0] != '\0') { + LOG_INFO("Loading UMF configuration from file: %s", file_var); + ctl_load_config_from_file(NULL, NULL, file_var); + } +} // Benchmarks may fork multiple times and topology init is slow. // Init topology before fork (if not already) so children don't repeat it. @@ -54,6 +70,8 @@ umf_result_t umfInit(void) { if (umfRefCount == 0) { utils_log_init(); + initialize_ctl(); + umf_result_t umf_result = umfMemoryTrackerCreate(&TRACKER); if (umf_result != UMF_RESULT_SUCCESS) { LOG_ERR("Failed to create memory tracker"); @@ -72,7 +90,6 @@ umf_result_t umfInit(void) { } LOG_DEBUG("UMF IPC cache initialized"); - initialize_global_ctl(); } umfRefCount++; @@ -99,7 +116,7 @@ umf_result_t umfTearDown(void) { } if (--umfRefCount == 0) { -#if !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#if !defined(_WIN32) umfMemspaceHostAllDestroy(); umfMemspaceHighestCapacityDestroy(); umfMemspaceHighestBandwidthDestroy(); @@ -122,6 +139,9 @@ umf_result_t umfTearDown(void) { umfMemoryTrackerDestroy(t); LOG_DEBUG("UMF tracker destroyed"); + umfProviderCtlDefaultsDestroy(); + umfPoolCtlDefaultsDestroy(); + umf_ba_destroy_global(); LOG_DEBUG("UMF base allocator destroyed"); @@ -137,38 +157,41 @@ umf_result_t umfTearDown(void) { int umfGetCurrentVersion(void) { return UMF_VERSION_CURRENT; } -umf_result_t umfCtlGet(const char *name, void *arg, size_t size, ...) { +umf_result_t umfCtlGet(const char *path, void *arg, size_t size, ...) { + libumfInit(); // ctx can be NULL when getting defaults - if (name == NULL || arg == NULL || size == 0) { + if (path == NULL || arg == NULL || size == 0) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } va_list args; va_start(args, size); - umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, name, + umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, path, CTL_QUERY_READ, arg, size, args); va_end(args); return ret; } -umf_result_t umfCtlSet(const char *name, void *arg, size_t size, ...) { +umf_result_t umfCtlSet(const char *path, void *arg, size_t size, ...) { + libumfInit(); // ctx can be NULL when setting defaults - if (name == NULL || arg == NULL || size == 0) { + if (path == NULL || arg == NULL || size == 0) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } va_list args; va_start(args, size); - umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, name, + umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, path, CTL_QUERY_WRITE, arg, size, args); va_end(args); return ret; } -umf_result_t umfCtlExec(const char *name, void *arg, size_t size, ...) { +umf_result_t umfCtlExec(const char *path, void *arg, size_t size, ...) { + libumfInit(); // arg can be NULL when executing a command // ctx can be NULL when executing defaults // size can depends on the arg - if (name == NULL) { + if (path == NULL) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } @@ -178,7 +201,7 @@ umf_result_t umfCtlExec(const char *name, void *arg, size_t size, ...) { va_list args; va_start(args, size); - umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, name, + umf_result_t ret = ctl_query(NULL, NULL, CTL_QUERY_PROGRAMMATIC, path, CTL_QUERY_RUNNABLE, arg, size, args); va_end(args); return ret; diff --git a/src/libumf.def b/src/libumf.def index 0159ddbe2b..c4093292e5 100644 --- a/src/libumf.def +++ b/src/libumf.def @@ -6,7 +6,7 @@ LIBRARY UMF -VERSION 1.0 +VERSION 1.1 EXPORTS DllMain @@ -144,3 +144,17 @@ EXPORTS umfJemallocPoolParamsDestroy umfJemallocPoolParamsSetNumArenas umfPoolGetName +; Added in UMF_1.1 + umfCUDAMemoryProviderParamsSetName + umfDevDaxMemoryProviderParamsSetName + umfFileMemoryProviderParamsSetName + umfFixedMemoryProviderParamsSetName + umfGetMemoryPropertiesHandle + umfGetMemoryProperty + umfGetMemoryPropertySize + umfJemallocPoolParamsSetName + umfLevelZeroMemoryProviderParamsSetName + umfLevelZeroMemoryProviderResidentDeviceChange + umfOsMemoryProviderParamsSetName + umfPoolTrimMemory + umfScalablePoolParamsSetName diff --git a/src/libumf.map b/src/libumf.map index 348675ff0f..d23c6a2e02 100644 --- a/src/libumf.map +++ b/src/libumf.map @@ -141,3 +141,19 @@ UMF_1.0 { local: *; }; + +UMF_1.1 { + umfCUDAMemoryProviderParamsSetName; + umfDevDaxMemoryProviderParamsSetName; + umfFileMemoryProviderParamsSetName; + umfFixedMemoryProviderParamsSetName; + umfGetMemoryPropertiesHandle; + umfGetMemoryProperty; + umfGetMemoryPropertySize; + umfJemallocPoolParamsSetName; + umfLevelZeroMemoryProviderParamsSetName; + umfLevelZeroMemoryProviderResidentDeviceChange; + umfOsMemoryProviderParamsSetName; + umfPoolTrimMemory; + umfScalablePoolParamsSetName; +} UMF_1.0; diff --git a/src/memory_pool.c b/src/memory_pool.c index 004f42d9ed..2161d5d45c 100644 --- a/src/memory_pool.c +++ b/src/memory_pool.c @@ -8,6 +8,7 @@ */ #include +#include #include #include @@ -16,6 +17,7 @@ #include #include "base_alloc_global.h" +#include "ctl/ctl_defaults.h" #include "ctl/ctl_internal.h" #include "libumf.h" #include "memory_pool_internal.h" @@ -24,19 +26,132 @@ #include "utils_assert.h" #include "utils_concurrency.h" #include "utils_log.h" +#include "utils_name.h" +#include "utlist.h" + +// Handle UTHash memory allocation failures without aborting the process. +#define HASH_NONFATAL_OOM 1 +static bool uthash_oom = false; +#define uthash_nonfatal_oom(obj) \ + do { \ + (void)(obj); \ + uthash_oom = true; \ + } while (0) + +#include "uthash.h" + +static ctl_default_entry_t *pool_default_list = NULL; +static utils_mutex_t pool_default_mtx; +static UTIL_ONCE_FLAG mem_pool_ctl_initialized = UTIL_ONCE_FLAG_INIT; -#define UMF_DEFAULT_SIZE 100 -#define UMF_DEFAULT_LEN 100 +static struct ctl umf_pool_ctl_root; -utils_mutex_t ctl_mtx; -static UTIL_ONCE_FLAG mem_pool_ctl_initialized = UTIL_ONCE_FLAG_INIT; +static void pool_ctl_init(void); -char CTL_DEFAULT_ENTRIES[UMF_DEFAULT_SIZE][UMF_DEFAULT_LEN] = {0}; -char CTL_DEFAULT_VALUES[UMF_DEFAULT_SIZE][UMF_DEFAULT_LEN] = {0}; +typedef struct pool_name_list_entry_t { + umf_memory_pool_handle_t pool; + struct pool_name_list_entry_t *next; +} pool_name_list_entry_t; -static struct ctl umf_pool_ctl_root; +typedef struct pool_name_dict_entry_t { + char *name; /* key */ + pool_name_list_entry_t *pools; + UT_hash_handle hh; +} pool_name_dict_entry_t; + +static pool_name_dict_entry_t *pools_by_name = NULL; +static utils_rwlock_t pools_by_name_lock; +static UTIL_ONCE_FLAG pools_by_name_init_once = UTIL_ONCE_FLAG_INIT; + +static void pools_by_name_init(void) { utils_rwlock_init(&pools_by_name_lock); } + +static umf_result_t pools_by_name_add(umf_memory_pool_handle_t pool) { + const char *name = NULL; + umf_result_t ret = pool->ops.get_name(pool->pool_priv, &name); + if (ret != UMF_RESULT_SUCCESS || !name) { + return ret; + } -static void ctl_init(void); + if (!utils_name_is_valid(name)) { + LOG_ERR("Pool name: %s contains invalid character, ctl by_name is not " + "supported for this pool", + name); + return UMF_RESULT_SUCCESS; + } + + utils_init_once(&pools_by_name_init_once, pools_by_name_init); + utils_write_lock(&pools_by_name_lock); + + pool_name_dict_entry_t *entry = NULL; + HASH_FIND_STR(pools_by_name, name, entry); + if (!entry) { + entry = umf_ba_global_alloc(sizeof(*entry)); + if (!entry) { + utils_write_unlock(&pools_by_name_lock); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + entry->name = umf_ba_global_strdup(name); + if (!entry->name) { + umf_ba_global_free(entry); + utils_write_unlock(&pools_by_name_lock); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + + entry->pools = NULL; + uthash_oom = false; + HASH_ADD_KEYPTR(hh, pools_by_name, entry->name, strlen(entry->name), + entry); + if (uthash_oom) { + umf_ba_global_free(entry->name); + umf_ba_global_free(entry); + utils_write_unlock(&pools_by_name_lock); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + } + + pool_name_list_entry_t *node = umf_ba_global_alloc(sizeof(*node)); + if (!node) { + utils_write_unlock(&pools_by_name_lock); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + node->pool = pool; + node->next = NULL; + LL_APPEND(entry->pools, node); + + utils_write_unlock(&pools_by_name_lock); + return UMF_RESULT_SUCCESS; +} + +static void pools_by_name_remove(umf_memory_pool_handle_t pool) { + const char *name = NULL; + if (pool->ops.get_name(pool->pool_priv, &name) != UMF_RESULT_SUCCESS || + !name) { + return; + } + + utils_init_once(&pools_by_name_init_once, pools_by_name_init); + utils_write_lock(&pools_by_name_lock); + + pool_name_dict_entry_t *entry = NULL; + HASH_FIND_STR(pools_by_name, name, entry); + if (entry) { + pool_name_list_entry_t *it = NULL, *tmp = NULL; + LL_FOREACH_SAFE(entry->pools, it, tmp) { + if (it->pool == pool) { + LL_DELETE(entry->pools, it); + umf_ba_global_free(it); + break; + } + } + if (entry->pools == NULL) { + HASH_DEL(pools_by_name, entry); + umf_ba_global_free(entry->name); + umf_ba_global_free(entry); + } + } + + utils_write_unlock(&pools_by_name_lock); +} static umf_result_t CTL_SUBTREE_HANDLER(CTL_NONAME, by_handle)( void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, @@ -52,7 +167,7 @@ static umf_result_t CTL_SUBTREE_HANDLER(CTL_NONAME, by_handle)( queryType, arg, size, args2); va_end(args2); - if (ret == UMF_RESULT_ERROR_INVALID_ARGUMENT) { + if (ret == UMF_RESULT_ERROR_INVALID_CTL_PATH) { // Node was not found in pool_ctl_root, try to query the specific pool ret = hPool->ops.ext_ctl(hPool->pool_priv, source, extra_name, arg, size, queryType, args); @@ -65,55 +180,12 @@ static umf_result_t CTL_SUBTREE_HANDLER(default)( void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, umf_ctl_index_utlist_t *indexes, const char *extra_name, umf_ctl_query_type_t queryType, va_list args) { - (void)indexes, (void)source, (void)ctx, (void)args; - utils_init_once(&mem_pool_ctl_initialized, ctl_init); - - if (strstr(extra_name, "{}") != NULL) { - // We might implement it in future - it requires store copy of va_list - // in defaults entries array, which according to C standard is possible, - // but quite insane. - LOG_ERR("%s, default setting do not support wildcard parameters {}", - extra_name); - return UMF_RESULT_ERROR_NOT_SUPPORTED; - } - - utils_mutex_lock(&ctl_mtx); - - if (queryType == CTL_QUERY_WRITE) { - int i = 0; - for (; i < UMF_DEFAULT_SIZE; i++) { - if (CTL_DEFAULT_ENTRIES[i][0] == '\0' || - strcmp(CTL_DEFAULT_ENTRIES[i], extra_name) == 0) { - strncpy(CTL_DEFAULT_ENTRIES[i], extra_name, UMF_DEFAULT_LEN); - CTL_DEFAULT_ENTRIES[i][UMF_DEFAULT_LEN - 1] = '\0'; - strncpy(CTL_DEFAULT_VALUES[i], arg, UMF_DEFAULT_LEN); - CTL_DEFAULT_VALUES[i][UMF_DEFAULT_LEN - 1] = '\0'; - break; - } - } - if (UMF_DEFAULT_SIZE == i) { - LOG_ERR("Default entries array is full"); - utils_mutex_unlock(&ctl_mtx); - return UMF_RESULT_ERROR_OUT_OF_RESOURCES; - } - } else if (queryType == CTL_QUERY_READ) { - int i = 0; - for (; i < UMF_DEFAULT_SIZE; i++) { - if (strcmp(CTL_DEFAULT_ENTRIES[i], extra_name) == 0) { - strncpy(arg, CTL_DEFAULT_VALUES[i], size); - break; - } - } - if (UMF_DEFAULT_SIZE == i) { - LOG_WARN("Wrong path name: %s", extra_name); - utils_mutex_unlock(&ctl_mtx); - return UMF_RESULT_ERROR_INVALID_ARGUMENT; - } - } - - utils_mutex_unlock(&ctl_mtx); - - return UMF_RESULT_SUCCESS; + (void)indexes; + (void)ctx; + (void)args; + utils_init_once(&mem_pool_ctl_initialized, pool_ctl_init); + return ctl_default_subtree(&pool_default_list, &pool_default_mtx, source, + arg, size, extra_name, queryType); } static umf_result_t @@ -145,11 +217,138 @@ static umf_ctl_node_t CTL_NODE(by_handle)[] = { static const struct ctl_argument CTL_ARG(by_handle) = CTL_ARG_PTR; +typedef struct by_name_arg_t { + char name[255]; + size_t index; +} by_name_arg_t; + +// parses optional size_t argument. if arg is not integer then sets out to size_max +static int by_name_index_parser(const void *arg, void *dest, size_t dest_size) { + size_t *out = (size_t *)dest; + + if (arg == NULL) { + *out = SIZE_MAX; + return 1; // node n + } + + int ret = ctl_arg_unsigned(arg, dest, dest_size); + if (ret) { + *out = SIZE_MAX; + return 1; + } + + return 0; +} + +static const struct ctl_argument CTL_ARG(by_name) = { + sizeof(by_name_arg_t), + {CTL_ARG_PARSER_STRUCT(by_name_arg_t, name, CTL_ARG_TYPE_STRING, + ctl_arg_string), + CTL_ARG_PARSER_STRUCT(by_name_arg_t, index, + CTL_ARG_TYPE_UNSIGNED_LONG_LONG, + by_name_index_parser), + CTL_ARG_PARSER_END}}; + +static umf_result_t CTL_SUBTREE_HANDLER(CTL_NONAME, by_name)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes, const char *extra_name, + umf_ctl_query_type_t queryType, va_list args) { + (void)ctx; + + utils_init_once(&pools_by_name_init_once, pools_by_name_init); + + by_name_arg_t *name_arg = (by_name_arg_t *)indexes->arg; + + utils_read_lock(&pools_by_name_lock); + pool_name_dict_entry_t *entry = NULL; + // find pool name in the hashmap + HASH_FIND_STR(pools_by_name, name_arg->name, entry); + if (!entry) { + utils_read_unlock(&pools_by_name_lock); + LOG_ERR("Pool %s not found", name_arg->name); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + size_t count = 0; + pool_name_list_entry_t *it = NULL; + LL_COUNT(entry->pools, it, count); + // Special case: if user asked for umf.pool.by_name.name.count, we just return + // number of pools sharing the same name. + if (strcmp(extra_name, "count") == 0) { + if (name_arg->index != SIZE_MAX) { + LOG_ERR("count field requires no index argument"); + utils_read_unlock(&pools_by_name_lock); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (queryType != CTL_QUERY_READ) { + LOG_ERR("count field is read only"); + utils_read_unlock(&pools_by_name_lock); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + size_t *output = (size_t *)arg; + *output = count; + utils_read_unlock(&pools_by_name_lock); + return UMF_RESULT_SUCCESS; + } + + if (queryType == CTL_QUERY_READ && count > 1 && + name_arg->index == SIZE_MAX) { + LOG_ERR("CTL 'by_name' read operation requires exactly one pool with " + "the specified name. " + "Actual number of pools with name '%s' is %zu. " + "You can add extra index parameter after the name to specify " + "exact pool e.g. umf.pool.by_name.pool_name,1.node_name", + name_arg->name, count); + utils_read_unlock(&pools_by_name_lock); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + umf_result_t ret = UMF_RESULT_SUCCESS; + size_t nr = 0; + + if (name_arg->index != SIZE_MAX && name_arg->index >= count) { + LOG_ERR( + "Invalid index %zu. Actual number of pools with name '%s' is %zu. ", + name_arg->index, name_arg->name, count); + utils_read_unlock(&pools_by_name_lock); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + LL_FOREACH(entry->pools, it) { + if (name_arg->index != SIZE_MAX && nr++ != name_arg->index) { + continue; + } + va_list args2; + va_copy(args2, args); + umf_result_t r = ctl_query(&umf_pool_ctl_root, it->pool, source, + extra_name, queryType, arg, size, args2); + va_end(args2); + + if (r == UMF_RESULT_ERROR_INVALID_CTL_PATH) { + va_copy(args2, args); + r = it->pool->ops.ext_ctl(it->pool->pool_priv, source, extra_name, + arg, size, queryType, args2); + va_end(args2); + } + if (r != UMF_RESULT_SUCCESS && ret == UMF_RESULT_SUCCESS) { + ret = r; + } + } + utils_read_unlock(&pools_by_name_lock); + + return ret; +} + +static umf_ctl_node_t CTL_NODE(by_name)[] = { + CTL_LEAF_SUBTREE(CTL_NONAME, by_name), + CTL_NODE_END, +}; + umf_ctl_node_t CTL_NODE(pool)[] = {CTL_CHILD_WITH_ARG(by_handle), + CTL_CHILD_WITH_ARG(by_name), CTL_LEAF_SUBTREE(default), CTL_NODE_END}; -static void ctl_init(void) { - utils_mutex_init(&ctl_mtx); +static void pool_ctl_init(void) { + utils_mutex_init(&pool_default_mtx); CTL_REGISTER_MODULE(&umf_pool_ctl_root, stats); } @@ -164,34 +363,42 @@ umfDefaultCtlPoolHandle(void *hPool, umf_ctl_query_source_t operationType, (void)size; (void)queryType; (void)args; + // if given path is not supported implementation should return UMF_RESULT_ERROR_INVALID_CTL_PATH + return UMF_RESULT_ERROR_INVALID_CTL_PATH; +} + +static umf_result_t umfDefaultTrimMemory(void *provider, + size_t minBytesToKeep) { + (void)provider; + (void)minBytesToKeep; return UMF_RESULT_ERROR_NOT_SUPPORTED; } +static umf_result_t umfPoolPostInitialize(const umf_memory_pool_ops_t *ops, + void *pool_priv, ...) { + va_list args; + va_start(args, pool_priv); + umf_result_t ret = + ops->ext_ctl(pool_priv, CTL_QUERY_PROGRAMMATIC, "post_initialize", NULL, + 0, CTL_QUERY_RUNNABLE, args); + va_end(args); + + return ret; +} + // logical sum (OR) of all umf_pool_create_flags_t flags static const umf_pool_create_flags_t UMF_POOL_CREATE_FLAG_ALL = UMF_POOL_CREATE_FLAG_OWN_PROVIDER | UMF_POOL_CREATE_FLAG_DISABLE_TRACKING; // windows do not allow to use uninitialized va_list so this function help us to initialize it. -static umf_result_t default_ctl_helper(const umf_memory_pool_ops_t *ops, - void *ctl, const char *name, void *arg, - ...) { - va_list empty_args; - va_start(empty_args, arg); - umf_result_t ret = - ops->ext_ctl(ctl, CTL_QUERY_PROGRAMMATIC, name, arg, UMF_DEFAULT_LEN, - CTL_QUERY_WRITE, empty_args); - va_end(empty_args); - return ret; -} - static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, umf_memory_provider_handle_t provider, const void *params, umf_pool_create_flags_t flags, umf_memory_pool_handle_t *hPool) { - if (!ops || !provider || !hPool) { - return UMF_RESULT_ERROR_INVALID_ARGUMENT; - } + UMF_CHECK((ops != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK((provider != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK((hPool != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); // validate flags if (flags & ~UMF_POOL_CREATE_FLAG_ALL) { @@ -200,11 +407,24 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, } umf_result_t ret = UMF_RESULT_SUCCESS; - + umf_memory_pool_ops_t compatible_ops; if (ops->version != UMF_POOL_OPS_VERSION_CURRENT) { LOG_WARN("Memory Pool ops version \"%d\" is different than the current " "version \"%d\"", ops->version, UMF_POOL_OPS_VERSION_CURRENT); + + // Create a new ops compatible structure with the current version + memset(&compatible_ops, 0, sizeof(compatible_ops)); + if (ops->version < UMF_MAKE_VERSION(1, 1)) { + LOG_INFO("Detected 1.0 version or below of Memory Pool ops, " + "upgrading to current version"); + memcpy(&compatible_ops, ops, + offsetof(umf_memory_pool_ops_t, ext_trim_memory)); + } else { + LOG_ERR("Unsupported Memory Pool ops version: %d", ops->version); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + ops = &compatible_ops; } umf_memory_pool_handle_t pool = @@ -223,7 +443,7 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, pool->provider = provider; } - utils_init_once(&mem_pool_ctl_initialized, ctl_init); + utils_init_once(&mem_pool_ctl_initialized, pool_ctl_init); pool->flags = flags; pool->ops = *ops; @@ -234,34 +454,40 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, pool->ops.ext_ctl = umfDefaultCtlPoolHandle; } + if (NULL == pool->ops.ext_trim_memory) { + pool->ops.ext_trim_memory = umfDefaultTrimMemory; + } + if (NULL == utils_mutex_init(&pool->lock)) { LOG_ERR("Failed to initialize mutex for pool"); ret = UMF_RESULT_ERROR_UNKNOWN; goto err_lock_init; } - ret = ops->initialize(pool->provider, params, &pool->pool_priv); + ret = pool->ops.initialize(pool->provider, params, &pool->pool_priv); if (ret != UMF_RESULT_SUCCESS) { goto err_pool_init; } - // Set default property "name" to pool if exists - for (int i = 0; i < UMF_DEFAULT_SIZE; i++) { - const char *pname = NULL; - ret = ops->get_name(NULL, &pname); - if (ret != UMF_RESULT_SUCCESS) { - LOG_ERR("Failed to get pool name"); - goto err_pool_init; - } - if (CTL_DEFAULT_ENTRIES[i][0] != '\0' && pname && - strstr(CTL_DEFAULT_ENTRIES[i], pname)) { + const char *pname = NULL; + ret = ops->get_name(pool->pool_priv, &pname); + if (ret != UMF_RESULT_SUCCESS) { + LOG_ERR("Failed to get pool name"); + goto err_pool_init; + } + assert(pname != NULL); + utils_warn_invalid_name("Memory pool", pname); + ctl_default_apply(pool_default_list, pname, ops->ext_ctl, pool->pool_priv); - default_ctl_helper(ops, pool->pool_priv, CTL_DEFAULT_ENTRIES[i], - CTL_DEFAULT_VALUES[i]); - } + ret = umfPoolPostInitialize(&pool->ops, pool->pool_priv); + if (ret != UMF_RESULT_SUCCESS && ret != UMF_RESULT_ERROR_INVALID_CTL_PATH) { + LOG_ERR("Failed to post-initialize pool"); + goto err_pool_init; } *hPool = pool; + pools_by_name_add(pool); + LOG_INFO("Memory pool created: %p", (void *)pool); return UMF_RESULT_SUCCESS; @@ -277,10 +503,14 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops, } umf_result_t umfPoolDestroy(umf_memory_pool_handle_t hPool) { + UMF_CHECK((hPool != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + if (umf_ba_global_is_destroyed()) { return UMF_RESULT_ERROR_UNKNOWN; } + pools_by_name_remove(hPool); + umf_result_t ret = hPool->ops.finalize(hPool->pool_priv); umf_memory_provider_handle_t hUpstreamProvider = NULL; @@ -323,9 +553,18 @@ umf_result_t umfFree(void *ptr) { } umf_result_t umfPoolByPtr(const void *ptr, umf_memory_pool_handle_t *pool) { - UMF_CHECK((pool != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); - *pool = umfMemoryTrackerGetPool(ptr); - return *pool ? UMF_RESULT_SUCCESS : UMF_RESULT_ERROR_INVALID_ARGUMENT; + UMF_CHECK(pool != NULL, UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK(ptr != NULL, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + umf_memory_properties_handle_t props = NULL; + umf_result_t ret = umfGetMemoryPropertiesHandle(ptr, &props); + if (ret != UMF_RESULT_SUCCESS || props == NULL || props->pool == NULL) { + *pool = NULL; + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + *pool = props->pool; + return UMF_RESULT_SUCCESS; } umf_result_t umfPoolGetMemoryProvider(umf_memory_pool_handle_t hPool, @@ -454,3 +693,15 @@ umf_result_t umfPoolGetTag(umf_memory_pool_handle_t hPool, void **tag) { utils_mutex_unlock(&hPool->lock); return UMF_RESULT_SUCCESS; } + +umf_result_t umfPoolTrimMemory(umf_memory_pool_handle_t hPool, + size_t minBytesToKeep) { + UMF_CHECK((hPool != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + + return hPool->ops.ext_trim_memory(hPool->pool_priv, minBytesToKeep); +} + +void umfPoolCtlDefaultsDestroy(void) { + utils_init_once(&mem_pool_ctl_initialized, pool_ctl_init); + ctl_default_destroy(&pool_default_list, &pool_default_mtx); +} diff --git a/src/memory_pool_internal.h b/src/memory_pool_internal.h index 00f9a2a05b..8c66cd4e78 100644 --- a/src/memory_pool_internal.h +++ b/src/memory_pool_internal.h @@ -47,6 +47,8 @@ typedef struct umf_memory_pool_t { extern umf_ctl_node_t CTL_NODE(pool)[]; +void umfPoolCtlDefaultsDestroy(void); + #ifdef __cplusplus } #endif diff --git a/src/memory_properties.c b/src/memory_properties.c new file mode 100644 index 0000000000..11d4e5593e --- /dev/null +++ b/src/memory_properties.c @@ -0,0 +1,158 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#include + +#include +#include +#include + +#include "memory_properties_internal.h" +#include "memory_provider_internal.h" +#include "provider/provider_tracking.h" + +umf_result_t +umfGetMemoryPropertiesHandle(const void *ptr, + umf_memory_properties_handle_t *props_handle) { + UMF_CHECK((props_handle != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + + tracker_alloc_info_t *info = NULL; + umf_result_t ret = umfMemoryTrackerGetAllocInfo(ptr, &info); + if (ret == UMF_RESULT_SUCCESS) { + *props_handle = &info->props; + return UMF_RESULT_SUCCESS; + } + + // try to get IPC info + umf_ipc_info_t ipc_info; + ret = umfMemoryTrackerGetIpcInfo(ptr, &ipc_info); + if (ret == UMF_RESULT_SUCCESS) { + *props_handle = ipc_info.props; + return UMF_RESULT_SUCCESS; + } + + LOG_ERR("Failed to get memory properties handle for ptr=%p", ptr); + return ret; +} + +umf_result_t +umfGetMemoryPropertySize(umf_memory_properties_handle_t props_handle, + umf_memory_property_id_t memory_property_id, + size_t *size) { + UMF_CHECK((size != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + + switch (memory_property_id) { + case UMF_MEMORY_PROPERTY_INVALID: + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + case UMF_MEMORY_PROPERTY_PROVIDER_HANDLE: + *size = sizeof(umf_memory_provider_handle_t); + return UMF_RESULT_SUCCESS; + case UMF_MEMORY_PROPERTY_POOL_HANDLE: + *size = sizeof(umf_memory_pool_handle_t); + return UMF_RESULT_SUCCESS; + case UMF_MEMORY_PROPERTY_BASE_ADDRESS: + *size = sizeof(uintptr_t); + return UMF_RESULT_SUCCESS; + case UMF_MEMORY_PROPERTY_BASE_SIZE: + *size = sizeof(size_t); + return UMF_RESULT_SUCCESS; + case UMF_MEMORY_PROPERTY_BUFFER_ID: + *size = sizeof(uint64_t); + return UMF_RESULT_SUCCESS; + case UMF_MEMORY_PROPERTY_POINTER_TYPE: + *size = sizeof(umf_usm_memory_type_t); + return UMF_RESULT_SUCCESS; + default: + break; + } + + // custom memory properties should be handled by the user provider + umf_memory_provider_t *provider = props_handle->provider; + if (provider->ops.ext_get_allocation_properties_size) { + return provider->ops.ext_get_allocation_properties_size( + provider->provider_priv, memory_property_id, size); + } + + LOG_ERR("Unknown memory property ID: %d", memory_property_id); + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + +umf_result_t umfGetMemoryProperty(umf_memory_properties_handle_t props_handle, + umf_memory_property_id_t memory_property_id, + void *value, size_t max_property_size) { + UMF_CHECK((value != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK((props_handle != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK((max_property_size > 0), UMF_RESULT_ERROR_INVALID_ARGUMENT); + + umf_memory_provider_t *provider = props_handle->provider; + + size_t property_size = 0; + umf_result_t ret = umfGetMemoryPropertySize( + props_handle, memory_property_id, &property_size); + if (UNLIKELY(ret != UMF_RESULT_SUCCESS)) { + LOG_ERR("Failed to get memory property size for ID %d", + memory_property_id); + return ret; + } + + if (UNLIKELY(property_size > max_property_size)) { + LOG_ERR("Memory property size %zu exceeds max size %zu for ID %d", + property_size, max_property_size, memory_property_id); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + switch (memory_property_id) { + case UMF_MEMORY_PROPERTY_INVALID: + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + + case UMF_MEMORY_PROPERTY_POOL_HANDLE: + *(umf_memory_pool_handle_t *)value = props_handle->pool; + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_PROVIDER_HANDLE: + *(umf_memory_provider_handle_t *)value = provider; + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_BUFFER_ID: + *(uint64_t *)value = props_handle->id; + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_BASE_ADDRESS: + *(uintptr_t *)value = (uintptr_t)props_handle->base; + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_BASE_SIZE: + *(size_t *)value = props_handle->base_size; + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_POINTER_TYPE: + // NOTE: this property is "cached" in the props_handle but the value is + // determined by the memory provider and set during addition to the + // tracker. + *(umf_usm_memory_type_t *)value = props_handle->memory_type; + return UMF_RESULT_SUCCESS; + + // GPU Memory Provider specific properties - should be handled by the + // provider + case UMF_MEMORY_PROPERTY_CONTEXT: + case UMF_MEMORY_PROPERTY_DEVICE: + default: + break; + }; + + // custom memory properties should be handled by the user provider + if (provider->ops.ext_get_allocation_properties) { + return provider->ops.ext_get_allocation_properties( + provider->provider_priv, props_handle->base, memory_property_id, + value); + } + + LOG_ERR("Unknown memory property ID: %d", memory_property_id); + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} diff --git a/src/memory_properties_internal.h b/src/memory_properties_internal.h new file mode 100644 index 0000000000..3f2d6ef3aa --- /dev/null +++ b/src/memory_properties_internal.h @@ -0,0 +1,45 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#ifndef UMF_MEMORY_PROPERTIES_INTERNAL_H +#define UMF_MEMORY_PROPERTIES_INTERNAL_H 1 + +#include + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct umf_memory_properties_t { + umf_memory_pool_handle_t pool; + umf_memory_provider_handle_t provider; + uint64_t id; + void *base; + size_t base_size; + umf_usm_memory_type_t memory_type; +} umf_memory_properties_t; + +umf_result_t umfMemoryProviderGetAllocationProperties( + umf_memory_provider_handle_t hProvider, const void *ptr, + umf_memory_property_id_t propertyId, void *property_value); + +umf_result_t umfMemoryProviderGetAllocationPropertiesSize( + umf_memory_provider_handle_t hProvider, umf_memory_property_id_t propertyId, + size_t *size); + +#ifdef __cplusplus +} +#endif + +#endif // UMF_MEMORY_PROPERTIES_INTERNAL_H diff --git a/src/memory_provider.c b/src/memory_provider.c index 8ed2a79dbb..413de848fc 100644 --- a/src/memory_provider.c +++ b/src/memory_provider.c @@ -8,19 +8,23 @@ */ #include +#include #include #include #include +#include #include #include #include "base_alloc.h" #include "base_alloc_global.h" +#include "ctl/ctl_defaults.h" #include "ctl/ctl_internal.h" #include "libumf.h" #include "memory_provider_internal.h" #include "utils_assert.h" +#include "utils_name.h" static umf_result_t CTL_SUBTREE_HANDLER(CTL_NONAME, by_handle)( void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, @@ -30,8 +34,8 @@ static umf_result_t CTL_SUBTREE_HANDLER(CTL_NONAME, by_handle)( umf_memory_provider_handle_t hProvider = *(umf_memory_provider_handle_t *)indexes->arg; - hProvider->ops.ext_ctl(hProvider->provider_priv, /*unused*/ 0, extra_name, - arg, size, queryType, args); + hProvider->ops.ext_ctl(hProvider->provider_priv, source, extra_name, arg, + size, queryType, args); return UMF_RESULT_SUCCESS; } @@ -43,8 +47,26 @@ static umf_ctl_node_t CTL_NODE(by_handle)[] = { static const struct ctl_argument CTL_ARG(by_handle) = CTL_ARG_PTR; +static ctl_default_entry_t *provider_default_list = NULL; +static utils_mutex_t provider_default_mtx; +static UTIL_ONCE_FLAG mem_provider_ctl_initialized = UTIL_ONCE_FLAG_INIT; + +static void provider_ctl_init(void) { utils_mutex_init(&provider_default_mtx); } + +static umf_result_t CTL_SUBTREE_HANDLER(default)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes, const char *extra_name, + umf_ctl_query_type_t queryType, va_list args) { + (void)ctx; + (void)indexes; + (void)args; + utils_init_once(&mem_provider_ctl_initialized, provider_ctl_init); + return ctl_default_subtree(&provider_default_list, &provider_default_mtx, + source, arg, size, extra_name, queryType); +} + umf_ctl_node_t CTL_NODE(provider)[] = {CTL_CHILD_WITH_ARG(by_handle), - CTL_NODE_END}; + CTL_LEAF_SUBTREE(default), CTL_NODE_END}; static umf_result_t umfDefaultPurgeLazy(void *provider, void *ptr, size_t size) { @@ -130,6 +152,26 @@ umfDefaultCtlHandle(void *provider, umf_ctl_query_source_t operationType, (void)size; (void)queryType; (void)args; + // if given path is not supported implementation should return UMF_RESULT_ERROR_INVALID_CTL_PATH + return UMF_RESULT_ERROR_INVALID_CTL_PATH; +} + +static umf_result_t +umfDefaultGetAllocationProperties(void *provider, const void *ptr, + umf_memory_property_id_t propertyId, + void *propertyValue) { + (void)provider; + (void)ptr; + (void)propertyId; + (void)propertyValue; + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + +static umf_result_t umfDefaultGetAllocationPropertiesSize( + void *provider, umf_memory_property_id_t propertyId, size_t *size) { + (void)provider; + (void)propertyId; + (void)size; return UMF_RESULT_ERROR_NOT_SUPPORTED; } @@ -153,6 +195,15 @@ void assignOpsExtDefaults(umf_memory_provider_ops_t *ops) { if (!ops->ext_ctl) { ops->ext_ctl = umfDefaultCtlHandle; } + + if (!ops->ext_get_allocation_properties) { + ops->ext_get_allocation_properties = umfDefaultGetAllocationProperties; + } + + if (!ops->ext_get_allocation_properties_size) { + ops->ext_get_allocation_properties_size = + umfDefaultGetAllocationPropertiesSize; + } } void assignOpsIpcDefaults(umf_memory_provider_ops_t *ops) { @@ -177,6 +228,29 @@ void assignOpsIpcDefaults(umf_memory_provider_ops_t *ops) { } } +static umf_result_t umfProviderPostInitialize(umf_memory_provider_ops_t *ops, + void *provider_priv, ...) { + + // "post_initialize" ctl query is supported since version 1.1 + if (ops->version < UMF_MAKE_VERSION(1, 1)) { + LOG_DEBUG( + "\"post_initialize\" ctl query was not required for memory " + "provider version %d, but the implementation could not handle " + "errors properly in this case", + ops->version); + return UMF_RESULT_ERROR_INVALID_CTL_PATH; + } + + va_list args; + va_start(args, provider_priv); + umf_result_t ret = + ops->ext_ctl(provider_priv, CTL_QUERY_PROGRAMMATIC, "post_initialize", + NULL, 0, CTL_QUERY_RUNNABLE, args); + va_end(args); + + return ret; +} + #define CHECK_OP(ops, fn) \ if (!(ops)->fn) { \ LOG_ERR("missing function pointer: %s\n", #fn); \ @@ -213,6 +287,14 @@ static bool validateOps(const umf_memory_provider_ops_t *ops) { return false; } + if ((ops->ext_get_allocation_properties == NULL) != + (ops->ext_get_allocation_properties_size == NULL)) { + LOG_ERR("ext_get_allocation_properties and " + "ext_get_allocation_properties_size must be " + "both set or both NULL\n"); + return false; + } + return true; } @@ -220,14 +302,36 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops, const void *params, umf_memory_provider_handle_t *hProvider) { libumfInit(); - if (!ops || !hProvider || !validateOps(ops)) { + if (!ops || !hProvider) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } + umf_memory_provider_ops_t compatible_ops; if (ops->version != UMF_PROVIDER_OPS_VERSION_CURRENT) { LOG_WARN("Memory Provider ops version \"%d\" is different than the " "current version \"%d\"", ops->version, UMF_PROVIDER_OPS_VERSION_CURRENT); + + // Create a new ops compatible structure with the current version + memset(&compatible_ops, 0, sizeof(compatible_ops)); + + if (UMF_MINOR_VERSION(ops->version) == 0) { + LOG_INFO("Detected 1.0 version of Memory Provider ops, " + "upgrading to current version"); + memcpy(&compatible_ops, ops, + offsetof(umf_memory_provider_ops_t, + ext_get_allocation_properties)); + } else { + LOG_ERR("Unsupported Memory Provider ops version: %d", + ops->version); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + + ops = &compatible_ops; + } + + if (!validateOps(ops)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; } umf_memory_provider_handle_t provider = @@ -250,6 +354,29 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops, provider->provider_priv = provider_priv; + utils_init_once(&mem_provider_ctl_initialized, provider_ctl_init); + const char *pname = NULL; + + ret = provider->ops.get_name(provider->provider_priv, &pname); + if (ret != UMF_RESULT_SUCCESS) { + LOG_ERR("Failed to get pool name"); + umf_ba_global_free(provider); + return ret; + } + + assert(pname != NULL); + utils_warn_invalid_name("Memory provider", pname); + + ctl_default_apply(provider_default_list, pname, provider->ops.ext_ctl, + provider->provider_priv); + + ret = umfProviderPostInitialize(&provider->ops, provider_priv); + if (ret != UMF_RESULT_SUCCESS && ret != UMF_RESULT_ERROR_INVALID_CTL_PATH) { + LOG_ERR("Failed to post-initialize provider"); + umf_ba_global_free(provider); + return ret; + } + *hProvider = provider; return UMF_RESULT_SUCCESS; @@ -499,3 +626,48 @@ umfMemoryProviderCloseIPCHandle(umf_memory_provider_handle_t hProvider, checkErrorAndSetLastProvider(res, hProvider); return res; } + +umf_result_t umfMemoryProviderGetAllocationProperties( + umf_memory_provider_handle_t hProvider, const void *ptr, + umf_memory_property_id_t propertyId, void *property_value) { + + UMF_CHECK((hProvider != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK((property_value != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK((propertyId != UMF_MEMORY_PROPERTY_INVALID), + UMF_RESULT_ERROR_INVALID_ARGUMENT); + + // NOTE: we do not check if the propertyId is below + // UMF_MEMORY_PROPERTY_MAX_RESERVED value, as the user could use a custom + // property ID that is above the reserved range + + umf_result_t res = hProvider->ops.ext_get_allocation_properties( + hProvider->provider_priv, ptr, propertyId, property_value); + + checkErrorAndSetLastProvider(res, hProvider); + return res; +} + +umf_result_t umfMemoryProviderGetAllocationPropertiesSize( + umf_memory_provider_handle_t hProvider, umf_memory_property_id_t propertyId, + size_t *size) { + + UMF_CHECK((hProvider != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK((size != NULL), UMF_RESULT_ERROR_INVALID_ARGUMENT); + UMF_CHECK((propertyId != UMF_MEMORY_PROPERTY_INVALID), + UMF_RESULT_ERROR_INVALID_ARGUMENT); + + // NOTE: we do not check if the propertyId is below + // UMF_MEMORY_PROPERTY_MAX_RESERVED value, as the user could use a custom + // property ID that is above the reserved range + + umf_result_t res = hProvider->ops.ext_get_allocation_properties_size( + hProvider->provider_priv, propertyId, size); + + checkErrorAndSetLastProvider(res, hProvider); + return res; +} + +void umfProviderCtlDefaultsDestroy(void) { + utils_init_once(&mem_provider_ctl_initialized, provider_ctl_init); + ctl_default_destroy(&provider_default_list, &provider_default_mtx); +} diff --git a/src/memory_provider_internal.h b/src/memory_provider_internal.h index 65ba5d41c7..b486377acd 100644 --- a/src/memory_provider_internal.h +++ b/src/memory_provider_internal.h @@ -28,6 +28,7 @@ typedef struct umf_memory_provider_t { void *umfMemoryProviderGetPriv(umf_memory_provider_handle_t hProvider); umf_memory_provider_handle_t *umfGetLastFailedMemoryProviderPtr(void); +void umfProviderCtlDefaultsDestroy(void); extern umf_ctl_node_t CTL_NODE(provider)[]; diff --git a/src/memspaces/memspace_highest_bandwidth.c b/src/memspaces/memspace_highest_bandwidth.c index 0790c406d2..7bdc6f9382 100644 --- a/src/memspaces/memspace_highest_bandwidth.c +++ b/src/memspaces/memspace_highest_bandwidth.c @@ -14,16 +14,15 @@ #include #include -// UMF_MEMSPACE_HIGHEST_BANDWIDTH requires HWLOC -// Additionally, it is currently unsupported on Win -#if defined(_WIN32) || defined(UMF_NO_HWLOC) +// UMF_MEMSPACE_HIGHEST_BANDWIDTH is currently unsupported on Win +#if defined(_WIN32) umf_const_memspace_handle_t umfMemspaceHighestBandwidthGet(void) { // not supported return NULL; } -#else // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#else // !defined(_WIN32) #include "base_alloc_global.h" #include "memspace_internal.h" @@ -119,4 +118,4 @@ umf_const_memspace_handle_t umfMemspaceHighestBandwidthGet(void) { return UMF_MEMSPACE_HIGHEST_BANDWIDTH; } -#endif // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#endif // !defined(_WIN32) diff --git a/src/memspaces/memspace_highest_capacity.c b/src/memspaces/memspace_highest_capacity.c index 36ef01b1c2..31d4ced518 100644 --- a/src/memspaces/memspace_highest_capacity.c +++ b/src/memspaces/memspace_highest_capacity.c @@ -13,16 +13,15 @@ #include #include -// UMF_MEMSPACE_HIGHEST_CAPACITY requires HWLOC -// Additionally, it is currently unsupported on Win -#if defined(_WIN32) || defined(UMF_NO_HWLOC) +// UMF_MEMSPACE_HIGHEST_CAPACITY is currently unsupported on Win +#if defined(_WIN32) umf_const_memspace_handle_t umfMemspaceHighestCapacityGet(void) { // not supported return NULL; } -#else // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#else // !defined(_WIN32) #include "base_alloc_global.h" #include "memspace_internal.h" @@ -87,4 +86,4 @@ umf_const_memspace_handle_t umfMemspaceHighestCapacityGet(void) { return UMF_MEMSPACE_HIGHEST_CAPACITY; } -#endif // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#endif // !defined(_WIN32) diff --git a/src/memspaces/memspace_host_all.c b/src/memspaces/memspace_host_all.c index 06a07a6603..5ff9e8edd1 100644 --- a/src/memspaces/memspace_host_all.c +++ b/src/memspaces/memspace_host_all.c @@ -13,16 +13,14 @@ #include #include -// UMF_MEMSPACE_HOST_ALL requires HWLOC -// Additionally, it is currently unsupported on Win - -#if defined(_WIN32) || defined(UMF_NO_HWLOC) +// UMF_MEMSPACE_HOST_ALL is currently unsupported on Win +#if defined(_WIN32) umf_const_memspace_handle_t umfMemspaceHostAllGet(void) { // not supported return NULL; } -#else // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#else // !defined(_WIN32) #include "base_alloc_global.h" #include "memspace_internal.h" @@ -108,4 +106,4 @@ umf_const_memspace_handle_t umfMemspaceHostAllGet(void) { return UMF_MEMSPACE_HOST_ALL; } -#endif // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#endif // !defined(_WIN32) diff --git a/src/memspaces/memspace_lowest_latency.c b/src/memspaces/memspace_lowest_latency.c index 8fc33ae41e..6e1780f2a3 100644 --- a/src/memspaces/memspace_lowest_latency.c +++ b/src/memspaces/memspace_lowest_latency.c @@ -14,16 +14,15 @@ #include #include -// UMF_MEMSPACE_LOWEST_LATENCY requires HWLOC -// Additionally, it is currently unsupported on Win -#if defined(_WIN32) || defined(UMF_NO_HWLOC) +// UMF_MEMSPACE_LOWEST_LATENCY is currently unsupported on Win +#if defined(_WIN32) umf_const_memspace_handle_t umfMemspaceLowestLatencyGet(void) { // not supported return NULL; } -#else // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#else // !defined(_WIN32) #include "base_alloc_global.h" #include "memspace_internal.h" @@ -119,4 +118,4 @@ umf_const_memspace_handle_t umfMemspaceLowestLatencyGet(void) { return UMF_MEMSPACE_LOWEST_LATENCY; } -#endif // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#endif // !defined(_WIN32) diff --git a/src/memspaces/memspace_numa.c b/src/memspaces/memspace_numa.c index 4ac420ff75..421d50fafa 100644 --- a/src/memspaces/memspace_numa.c +++ b/src/memspaces/memspace_numa.c @@ -12,9 +12,8 @@ #include #include -// umfMemspaceCreateFromNumaArray requires HWLOC -// Additionally, it is currently unsupported on Win -#if defined(_WIN32) || defined(UMF_NO_HWLOC) +// umfMemspaceCreateFromNumaArray is currently unsupported on Win +#if defined(_WIN32) umf_result_t umfMemspaceCreateFromNumaArray(unsigned *nodeIds, size_t numIds, umf_memspace_handle_t *hMemspace) { (void)nodeIds; @@ -25,7 +24,7 @@ umf_result_t umfMemspaceCreateFromNumaArray(unsigned *nodeIds, size_t numIds, return UMF_RESULT_ERROR_NOT_SUPPORTED; } -#else // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#else // !defined(_WIN32) #include "../memspace_internal.h" #include "../memtargets/memtarget_numa.h" @@ -76,4 +75,4 @@ umf_result_t umfMemspaceCreateFromNumaArray(unsigned *nodeIds, size_t numIds, return ret; } -#endif // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +#endif // !defined(_WIN32) diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index e5339376e5..e059bba455 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -7,7 +7,6 @@ #include #include -#include #include #include #include @@ -27,12 +26,24 @@ #include "utils_log.h" #include "utils_math.h" -static char *DEFAULT_NAME = "disjoint"; +static const char *DEFAULT_NAME = "disjoint"; /* Disjoint pool CTL implementation */ struct ctl disjoint_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; +umf_result_t disjoint_pool_post_initialize(void *ppPool); +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return disjoint_pool_post_initialize(ctx); +} + // Disable name ctl for 1.0 release #if 0 static umf_result_t CTL_READ_HANDLER(name)(void *ctx, @@ -44,6 +55,7 @@ static umf_result_t CTL_READ_HANDLER(name)(void *ctx, disjoint_pool_t *pool = (disjoint_pool_t *)ctx; if (arg == NULL) { + LOG_ERR("arg is NULL"); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } @@ -64,6 +76,7 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx, (void)source, (void)indexes, (void)size; disjoint_pool_t *pool = (disjoint_pool_t *)ctx; if (arg == NULL) { + LOG_ERR("arg is NULL"); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } @@ -73,6 +86,211 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx, return UMF_RESULT_SUCCESS; } #endif + +static const struct ctl_argument + CTL_ARG(slab_min_size) = CTL_ARG_UNSIGNED_LONG_LONG; +static const struct ctl_argument + CTL_ARG(max_poolable_size) = CTL_ARG_UNSIGNED_LONG_LONG; +static const struct ctl_argument CTL_ARG(capacity) = CTL_ARG_UNSIGNED_LONG_LONG; +static const struct ctl_argument + CTL_ARG(min_bucket_size) = CTL_ARG_UNSIGNED_LONG_LONG; +static const struct ctl_argument CTL_ARG(pool_trace) = CTL_ARG_INT; + +static umf_result_t +CTL_READ_HANDLER(slab_min_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(size_t *)arg = pool->params.slab_min_size; + return UMF_RESULT_SUCCESS; +} + +// indicates that param was overridden by CTL +enum { + DP_OVERRIDE_SLAB_MIN_SIZE = 1 << 0, + DP_OVERRIDE_MAX_POOLABLE_SIZE = 1 << 1, + DP_OVERRIDE_CAPACITY = 1 << 2, + DP_OVERRIDE_MIN_BUCKET_SIZE = 1 << 3, + DP_OVERRIDE_POOL_TRACE = 1 << 4, +}; + +static umf_result_t +CTL_WRITE_HANDLER(slab_min_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (pool->post_initialized) { + LOG_ERR("writing parameter after post_initialize is not allowed"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + size_t value = *(size_t *)arg; + umf_result_t ret = + umfDisjointPoolParamsSetSlabMinSize(&pool->params, value); + if (ret == UMF_RESULT_SUCCESS) { + pool->params_overridden |= DP_OVERRIDE_SLAB_MIN_SIZE; + } + return ret; +} + +static umf_result_t +CTL_READ_HANDLER(max_poolable_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(size_t *)arg = pool->params.max_poolable_size; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(max_poolable_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (pool->post_initialized) { + LOG_ERR("writing parameter after post_initialize is not allowed"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + size_t value = *(size_t *)arg; + umf_result_t ret = + umfDisjointPoolParamsSetMaxPoolableSize(&pool->params, value); + if (ret == UMF_RESULT_SUCCESS) { + pool->params_overridden |= DP_OVERRIDE_MAX_POOLABLE_SIZE; + } + return ret; +} + +static umf_result_t +CTL_READ_HANDLER(capacity)(void *ctx, umf_ctl_query_source_t source, void *arg, + size_t size, umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(size_t *)arg = pool->params.capacity; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(capacity)(void *ctx, umf_ctl_query_source_t source, void *arg, + size_t size, umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (pool->post_initialized) { + LOG_ERR("writing parameter after post_initialize is not allowed"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + size_t value = *(size_t *)arg; + umf_result_t ret = umfDisjointPoolParamsSetCapacity(&pool->params, value); + if (ret == UMF_RESULT_SUCCESS) { + pool->params_overridden |= DP_OVERRIDE_CAPACITY; + } + return ret; +} + +static umf_result_t +CTL_READ_HANDLER(min_bucket_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(size_t *)arg = pool->params.min_bucket_size; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(min_bucket_size)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (pool->post_initialized) { + LOG_ERR("writing parameter after post_initialize is not allowed"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + size_t value = *(size_t *)arg; + umf_result_t ret = + umfDisjointPoolParamsSetMinBucketSize(&pool->params, value); + if (ret == UMF_RESULT_SUCCESS) { + pool->params_overridden |= DP_OVERRIDE_MIN_BUCKET_SIZE; + } + return ret; +} + +static umf_result_t +CTL_READ_HANDLER(pool_trace)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(int)) { + LOG_ERR("arg is NULL or size is not sizeof(int)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *(int *)arg = pool->params.pool_trace; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(pool_trace)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (pool->post_initialized) { + LOG_ERR("writing parameter after post_initialize is not allowed"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + if (arg == NULL || size != sizeof(int)) { + LOG_ERR("arg is NULL or size is not sizeof(int)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + int value = *(int *)arg; + umf_result_t ret = umfDisjointPoolParamsSetTrace(&pool->params, value); + if (ret == UMF_RESULT_SUCCESS) { + pool->params_overridden |= DP_OVERRIDE_POOL_TRACE; + } + return ret; +} + +static const umf_ctl_node_t CTL_NODE(params)[] = { + CTL_LEAF_RW(slab_min_size), CTL_LEAF_RW(max_poolable_size), + CTL_LEAF_RW(capacity), CTL_LEAF_RW(min_bucket_size), + CTL_LEAF_RW(pool_trace), CTL_NODE_END, +}; static umf_result_t CTL_READ_HANDLER(used_memory)(void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, @@ -81,6 +299,7 @@ CTL_READ_HANDLER(used_memory)(void *ctx, umf_ctl_query_source_t source, disjoint_pool_t *pool = (disjoint_pool_t *)ctx; if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } @@ -119,6 +338,7 @@ CTL_READ_HANDLER(reserved_memory)(void *ctx, umf_ctl_query_source_t source, disjoint_pool_t *pool = (disjoint_pool_t *)ctx; if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } @@ -148,23 +368,196 @@ CTL_READ_HANDLER(reserved_memory)(void *ctx, umf_ctl_query_source_t source, return UMF_RESULT_SUCCESS; } -static const umf_ctl_node_t CTL_NODE(stats)[] = {CTL_LEAF_RO(used_memory), - CTL_LEAF_RO(reserved_memory)}; +static umf_result_t CTL_READ_HANDLER(count)(void *ctx, + umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; + if (arg == NULL || size != sizeof(size_t)) { + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (*(size_t *)indexes->arg != SIZE_MAX) { + LOG_ERR("to read buckets' count, you must call it without bucket id"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + assert(pool); + *(size_t *)arg = pool->buckets_num; + + return UMF_RESULT_SUCCESS; +} + +#define DEFINE_STATS_HANDLER(NAME, MEMBER) \ + static umf_result_t CTL_READ_HANDLER(NAME)( \ + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, \ + umf_ctl_index_utlist_t *indexes) { \ + (void)source; \ + (void)indexes; \ + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; \ + \ + if (arg == NULL || size != sizeof(size_t)) { \ + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); \ + return UMF_RESULT_ERROR_INVALID_ARGUMENT; \ + } \ + \ + if (!pool->params.pool_trace) { \ + LOG_ERR("pool trace is disabled, cannot read " #NAME); \ + return UMF_RESULT_ERROR_NOT_SUPPORTED; \ + } \ + \ + size_t total = 0; \ + for (size_t i = 0; i < pool->buckets_num; ++i) { \ + bucket_t *bucket = pool->buckets[i]; \ + utils_mutex_lock(&bucket->bucket_lock); \ + total += bucket->MEMBER; \ + utils_mutex_unlock(&bucket->bucket_lock); \ + } \ + \ + *(size_t *)arg = total; \ + return UMF_RESULT_SUCCESS; \ + } + +DEFINE_STATS_HANDLER(alloc_num, alloc_count) +DEFINE_STATS_HANDLER(alloc_pool_num, alloc_pool_count) +DEFINE_STATS_HANDLER(free_num, free_count) +DEFINE_STATS_HANDLER(curr_slabs_in_use, curr_slabs_in_use) +DEFINE_STATS_HANDLER(curr_slabs_in_pool, curr_slabs_in_pool) +DEFINE_STATS_HANDLER(max_slabs_in_use, max_slabs_in_use) +DEFINE_STATS_HANDLER(max_slabs_in_pool, max_slabs_in_pool) + +static const umf_ctl_node_t CTL_NODE(stats)[] = { + CTL_LEAF_RO(used_memory), CTL_LEAF_RO(reserved_memory), + CTL_LEAF_RO(alloc_num), CTL_LEAF_RO(alloc_pool_num), + CTL_LEAF_RO(free_num), CTL_LEAF_RO(curr_slabs_in_use), + CTL_LEAF_RO(curr_slabs_in_pool), CTL_LEAF_RO(max_slabs_in_use), + CTL_LEAF_RO(max_slabs_in_pool), CTL_NODE_END, +}; + +#undef DEFINE_STATS_HANDLER + +#ifdef UMF_DEVELOPER_MODE +#define VALIDATE_BUCKETS_NAME(indexes) \ + if (strcmp("buckets", indexes->name) != 0) { \ + return UMF_RESULT_ERROR_INVALID_ARGUMENT; \ + } +#else +#define VALIDATE_BUCKETS_NAME(indexes) \ + do { \ + } while (0); +#endif + +#define DEFINE_BUCKET_STATS_HANDLER(NAME, MEMBER) \ + static umf_result_t CTL_READ_HANDLER(NAME, perBucket)( \ + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, \ + umf_ctl_index_utlist_t *indexes) { \ + (void)source; \ + \ + disjoint_pool_t *pool = (disjoint_pool_t *)ctx; \ + if (arg == NULL || size != sizeof(size_t)) { \ + LOG_ERR("arg is NULL or size is not sizeof(size_t)"); \ + return UMF_RESULT_ERROR_INVALID_ARGUMENT; \ + } \ + \ + VALIDATE_BUCKETS_NAME(indexes); \ + if (strcmp(#MEMBER, "size") != 0 && !pool->params.pool_trace) { \ + LOG_ERR("pool trace is disabled, cannot read " #NAME); \ + return UMF_RESULT_ERROR_NOT_SUPPORTED; \ + } \ + \ + size_t idx; \ + idx = *(size_t *)indexes->arg; \ + \ + if (idx >= pool->buckets_num) { \ + LOG_ERR("bucket id %zu is out of range [0, %zu)", idx, \ + pool->buckets_num); \ + return UMF_RESULT_ERROR_INVALID_ARGUMENT; \ + } \ + \ + bucket_t *bucket = pool->buckets[idx]; \ + utils_mutex_lock(&bucket->bucket_lock); \ + *(size_t *)arg = bucket->MEMBER; \ + utils_mutex_unlock(&bucket->bucket_lock); \ + \ + return UMF_RESULT_SUCCESS; \ + } + +DEFINE_BUCKET_STATS_HANDLER(alloc_num, alloc_count) +DEFINE_BUCKET_STATS_HANDLER(alloc_pool_num, alloc_pool_count) +DEFINE_BUCKET_STATS_HANDLER(free_num, free_count) +DEFINE_BUCKET_STATS_HANDLER(curr_slabs_in_use, curr_slabs_in_use) +DEFINE_BUCKET_STATS_HANDLER(curr_slabs_in_pool, curr_slabs_in_pool) +DEFINE_BUCKET_STATS_HANDLER(max_slabs_in_use, max_slabs_in_use) +DEFINE_BUCKET_STATS_HANDLER(max_slabs_in_pool, max_slabs_in_pool) + +static const umf_ctl_node_t CTL_NODE(stats, perBucket)[] = { + CTL_LEAF_RO(alloc_num, perBucket), + CTL_LEAF_RO(alloc_pool_num, perBucket), + CTL_LEAF_RO(free_num, perBucket), + CTL_LEAF_RO(curr_slabs_in_use, perBucket), + CTL_LEAF_RO(curr_slabs_in_pool, perBucket), + CTL_LEAF_RO(max_slabs_in_use, perBucket), + CTL_LEAF_RO(max_slabs_in_pool, perBucket), + CTL_NODE_END, +}; + +// Not a counter; but it is read exactly like other per-bucket stats, so we can use macro. +DEFINE_BUCKET_STATS_HANDLER(size, size) + +#undef DEFINE_BUCKET_STATS_HANDLER + +static const umf_ctl_node_t CTL_NODE(buckets)[] = { + CTL_LEAF_RO(count), CTL_LEAF_RO(size, perBucket), + CTL_CHILD(stats, perBucket), CTL_NODE_END}; + +static int bucket_id_parser(const void *arg, void *dest, size_t dest_size) { + size_t *out = (size_t *)dest; + assert(out); + + if (arg == NULL) { + *out = SIZE_MAX; + return 1; // node n + } + + int ret = ctl_arg_unsigned(arg, dest, dest_size); + if (ret) { + *out = SIZE_MAX; + return 1; + } + + return 0; +} + +static const struct ctl_argument CTL_ARG(buckets) = { + sizeof(size_t), + {{0, sizeof(size_t), CTL_ARG_TYPE_UNSIGNED_LONG_LONG, bucket_id_parser}, + CTL_ARG_PARSER_END}}; static void initialize_disjoint_ctl(void) { + CTL_REGISTER_MODULE(&disjoint_ctl_root, params); CTL_REGISTER_MODULE(&disjoint_ctl_root, stats); - // CTL_REGISTER_MODULE(&disjoint_ctl_root, name); + CTL_REGISTER_MODULE(&disjoint_ctl_root, buckets); + // TODO: this is hack. Need some way to register module as node with argument + disjoint_ctl_root.root[disjoint_ctl_root.first_free - 1].arg = + &CTL_ARG(buckets); + disjoint_ctl_root.root[disjoint_ctl_root.first_free++] = (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; } umf_result_t disjoint_pool_ctl(void *hPool, umf_ctl_query_source_t operationType, const char *name, void *arg, size_t size, umf_ctl_query_type_t queryType, va_list args) { - (void)operationType; utils_init_once(&ctl_initialized, initialize_disjoint_ctl); - return ctl_query(&disjoint_ctl_root, hPool, CTL_QUERY_PROGRAMMATIC, name, - queryType, arg, size, args); + return ctl_query(&disjoint_ctl_root, hPool, operationType, name, queryType, + arg, size, args); } // Temporary solution for disabling memory poisoning. This is needed because @@ -757,6 +1150,41 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, disjoint_pool->provider = provider; disjoint_pool->params = *dp_params; + disjoint_pool->post_initialized = false; + disjoint_pool->params_overridden = 0; + + *ppPool = (void *)disjoint_pool; + + return UMF_RESULT_SUCCESS; +} + +umf_result_t disjoint_pool_post_initialize(void *ppPool) { + disjoint_pool_t *disjoint_pool = (disjoint_pool_t *)ppPool; + + disjoint_pool->post_initialized = true; + + if (disjoint_pool->params_overridden) { + if (disjoint_pool->params_overridden & DP_OVERRIDE_SLAB_MIN_SIZE) { + LOG_INFO("CTL override: slab_min_size=%zu", + disjoint_pool->params.slab_min_size); + } + if (disjoint_pool->params_overridden & DP_OVERRIDE_MAX_POOLABLE_SIZE) { + LOG_INFO("CTL override: max_poolable_size=%zu", + disjoint_pool->params.max_poolable_size); + } + if (disjoint_pool->params_overridden & DP_OVERRIDE_CAPACITY) { + LOG_INFO("CTL override: capacity=%zu", + disjoint_pool->params.capacity); + } + if (disjoint_pool->params_overridden & DP_OVERRIDE_MIN_BUCKET_SIZE) { + LOG_INFO("CTL override: min_bucket_size=%zu", + disjoint_pool->params.min_bucket_size); + } + if (disjoint_pool->params_overridden & DP_OVERRIDE_POOL_TRACE) { + LOG_INFO("CTL override: pool_trace=%d", + disjoint_pool->params.pool_trace); + } + } disjoint_pool->known_slabs = critnib_new(free_slab, NULL); if (disjoint_pool->known_slabs == NULL) { @@ -816,13 +1244,11 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, } umf_result_t ret = umfMemoryProviderGetMinPageSize( - provider, NULL, &disjoint_pool->provider_min_page_size); + disjoint_pool->provider, NULL, &disjoint_pool->provider_min_page_size); if (ret != UMF_RESULT_SUCCESS) { disjoint_pool->provider_min_page_size = 0; } - *ppPool = (void *)disjoint_pool; - return UMF_RESULT_SUCCESS; err_free_buckets: @@ -841,7 +1267,6 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider, err_free_disjoint_pool: umf_ba_global_free(disjoint_pool); - return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } @@ -982,14 +1407,19 @@ umf_result_t disjoint_pool_malloc_usable_size(void *pool, const void *ptr, critnib_release(disjoint_pool->known_slabs, ref_slab); } - umf_alloc_info_t allocInfo = {NULL, 0, NULL}; - umf_result_t ret = umfMemoryTrackerGetAllocInfo(ptr, &allocInfo); - if (ret != UMF_RESULT_SUCCESS) { - *size = 0; - return ret; + umf_memory_properties_handle_t props = NULL; + umf_result_t umf_result = umfGetMemoryPropertiesHandle(ptr, &props); + if (umf_result != UMF_RESULT_SUCCESS) { + return umf_result; + } + + if (props == NULL) { + TLS_last_allocation_error = UMF_RESULT_ERROR_UNKNOWN; + LOG_ERR("failed to get allocation info from the memory tracker"); + return UMF_RESULT_ERROR_UNKNOWN; } - *size = allocInfo.baseSize; + *size = props->base_size; return UMF_RESULT_SUCCESS; } @@ -1025,15 +1455,21 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) { critnib_release(disjoint_pool->known_slabs, ref_slab); } - umf_alloc_info_t allocInfo = {NULL, 0, NULL}; - umf_result_t ret = umfMemoryTrackerGetAllocInfo(ptr, &allocInfo); + umf_memory_properties_handle_t props = NULL; + umf_result_t ret = umfGetMemoryPropertiesHandle(ptr, &props); if (ret != UMF_RESULT_SUCCESS) { TLS_last_allocation_error = ret; LOG_ERR("failed to get allocation info from the memory tracker"); return ret; } - size_t size = allocInfo.baseSize; + if (props == NULL) { + TLS_last_allocation_error = UMF_RESULT_ERROR_UNKNOWN; + LOG_ERR("failed to get allocation info from the memory tracker"); + return UMF_RESULT_ERROR_UNKNOWN; + } + + size_t size = props->base_size; umf_memory_provider_handle_t provider = disjoint_pool->provider; ret = umfMemoryProviderFree(provider, ptr, size); if (ret != UMF_RESULT_SUCCESS) { @@ -1133,6 +1569,47 @@ static umf_result_t disjoint_pool_get_name(void *pool, const char **name) { return UMF_RESULT_SUCCESS; } +umf_result_t disjoint_pool_trim_memory(void *pool, size_t minBytesToKeep) { + assert(pool != NULL); + disjoint_pool_t *hPool = (disjoint_pool_t *)pool; + + for (size_t i = 0; i < hPool->buckets_num; i++) { + bucket_t *bucket = hPool->buckets[i]; + utils_mutex_lock(&bucket->bucket_lock); + + // remove empty slabs from the pool + slab_list_item_t *it = NULL, *tmp = NULL; + LL_FOREACH_SAFE(bucket->available_slabs, it, tmp) { + slab_t *slab = it->val; + if (slab->num_chunks_allocated == 0) { + if (minBytesToKeep > 0) { + // if we still have bytes to keep, do not remove slab + if (minBytesToKeep > slab->slab_size) { + minBytesToKeep -= slab->slab_size; + } else { + minBytesToKeep = 0; + } + continue; + } + + // remove slab + destroy_slab(slab); + DL_DELETE(bucket->available_slabs, it); + assert(bucket->available_slabs_num > 0); + bucket->available_slabs_num--; + pool_unregister_slab(hPool, slab); + + // update stats + bucket_update_stats(bucket, 0, -1); + } + } + + utils_mutex_unlock(&bucket->bucket_lock); + } + + return UMF_RESULT_SUCCESS; +} + static umf_memory_pool_ops_t UMF_DISJOINT_POOL_OPS = { .version = UMF_POOL_OPS_VERSION_CURRENT, .initialize = disjoint_pool_initialize, @@ -1146,6 +1623,7 @@ static umf_memory_pool_ops_t UMF_DISJOINT_POOL_OPS = { .get_last_allocation_error = disjoint_pool_get_last_allocation_error, .get_name = disjoint_pool_get_name, .ext_ctl = disjoint_pool_ctl, + .ext_trim_memory = disjoint_pool_trim_memory, }; const umf_memory_pool_ops_t *umfDisjointPoolOps(void) { diff --git a/src/pool/pool_disjoint_internal.h b/src/pool/pool_disjoint_internal.h index e52a0f4969..4d934479e0 100644 --- a/src/pool/pool_disjoint_internal.h +++ b/src/pool/pool_disjoint_internal.h @@ -159,6 +159,12 @@ typedef struct disjoint_pool_t { // Coarse-grain allocation min alignment size_t provider_min_page_size; + + // true after post_initialize was successfully called + bool post_initialized; + + // bitmask of parameters overridden via CTL + unsigned params_overridden; } disjoint_pool_t; static inline void slab_set_chunk_bit(slab_t *slab, size_t index, bool value) { diff --git a/src/pool/pool_jemalloc.c b/src/pool/pool_jemalloc.c index 343b30a28d..50a245e05d 100644 --- a/src/pool/pool_jemalloc.c +++ b/src/pool/pool_jemalloc.c @@ -12,6 +12,7 @@ #include #include "base_alloc_global.h" +#include "ctl/ctl_internal.h" #include "memory_provider_internal.h" #include "provider_tracking.h" #include "utils_common.h" @@ -46,26 +47,41 @@ umfJemallocPoolParamsSetNumArenas(umf_jemalloc_pool_params_handle_t hParams, return UMF_RESULT_ERROR_NOT_SUPPORTED; } +umf_result_t +umfJemallocPoolParamsSetName(umf_jemalloc_pool_params_handle_t hParams, + const char *name) { + (void)hParams; // unused + (void)name; // unused + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + #else #include #define MALLOCX_ARENA_MAX (MALLCTL_ARENAS_ALL - 1) +#define DEFAULT_NAME "jemalloc" typedef struct umf_jemalloc_pool_params_t { size_t n_arenas; + char name[64]; } umf_jemalloc_pool_params_t; typedef struct jemalloc_memory_pool_t { umf_memory_provider_handle_t provider; + umf_jemalloc_pool_params_t params; size_t n_arenas; - unsigned int arena_index[]; + char name[64]; + unsigned int *arena_index; } jemalloc_memory_pool_t; static __TLS umf_result_t TLS_last_allocation_error; static jemalloc_memory_pool_t *pool_by_arena_index[MALLCTL_ARENAS_ALL]; +struct ctl jemalloc_ctl_root; +static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; + static jemalloc_memory_pool_t *get_pool_by_arena_index(unsigned arena_ind) { // there is no way to obtain MALLOCX_ARENA_MAX from jemalloc // so this checks if arena_ind does not exceed assumed range @@ -117,7 +133,7 @@ static void *arena_extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, #ifndef __SANITIZE_ADDRESS__ // jemalloc might write to new extents in realloc, so we cannot - // mark them as unaccessible under asan + // mark them as inaccessible under asan utils_annotate_memory_inaccessible(ptr, size); #endif @@ -472,11 +488,35 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, return ret; } + jemalloc_memory_pool_t *pool = umf_ba_global_alloc(sizeof(*pool)); + if (!pool) { + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + memset(pool, 0, sizeof(*pool)); + + pool->provider = provider; + if (params) { + pool->params = *(const umf_jemalloc_pool_params_t *)params; + } else { + // Set default values + memset(&pool->params, 0, sizeof(pool->params)); + strncpy(pool->params.name, DEFAULT_NAME, sizeof(pool->params.name) - 1); + } + + *out_pool = pool; + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t op_post_initialize(void *pool) { + assert(pool); + extent_hooks_t *pHooks = &arena_extent_hooks; size_t unsigned_size = sizeof(unsigned); int n_arenas_set_from_params = 0; + jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; int err; - const umf_jemalloc_pool_params_t *jemalloc_params = params; + const umf_jemalloc_pool_params_t *jemalloc_params = &je_pool->params; size_t n_arenas = 0; if (jemalloc_params) { @@ -486,25 +526,34 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, if (n_arenas == 0) { n_arenas = utils_get_num_cores() * 4; - if (n_arenas > MALLOCX_ARENA_MAX) { - n_arenas = MALLOCX_ARENA_MAX; - } + n_arenas = utils_min(n_arenas, (size_t)MALLOCX_ARENA_MAX); } if (n_arenas > MALLOCX_ARENA_MAX) { LOG_ERR("Number of arenas %zu exceeds the limit (%i).", n_arenas, MALLOCX_ARENA_MAX); + umf_ba_global_free(je_pool); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } - jemalloc_memory_pool_t *pool = umf_ba_global_alloc( - sizeof(*pool) + n_arenas * sizeof(*pool->arena_index)); - if (!pool) { + je_pool->arena_index = + umf_ba_global_alloc(n_arenas * sizeof(*je_pool->arena_index)); + if (!je_pool->arena_index) { + LOG_ERR("Could not allocate memory for arena indices."); + umf_ba_global_free(je_pool); return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } - pool->provider = provider; - pool->n_arenas = n_arenas; + memset(je_pool->arena_index, 0, n_arenas * sizeof(*je_pool->arena_index)); + + const char *pool_name = DEFAULT_NAME; + if (jemalloc_params) { + pool_name = jemalloc_params->name; + } + + snprintf(je_pool->name, sizeof(je_pool->name), "%s", pool_name); + + je_pool->n_arenas = n_arenas; size_t num_created = 0; for (size_t i = 0; i < n_arenas; i++) { @@ -529,13 +578,13 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, break; } - pool->arena_index[num_created++] = arena_index; + je_pool->arena_index[num_created++] = arena_index; if (arena_index >= MALLOCX_ARENA_MAX) { LOG_ERR("Number of arenas exceeds the limit."); goto err_cleanup; } - pool_by_arena_index[arena_index] = pool; + pool_by_arena_index[arena_index] = je_pool; // Setup extent_hooks for the newly created arena. char cmd[64]; @@ -546,9 +595,8 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, goto err_cleanup; } } - *out_pool = (umf_memory_pool_handle_t)pool; - VALGRIND_DO_CREATE_MEMPOOL(pool, 0, 0); + VALGRIND_DO_CREATE_MEMPOOL(je_pool, 0, 0); return UMF_RESULT_SUCCESS; @@ -556,14 +604,49 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider, // Destroy any arenas that were successfully created. for (size_t i = 0; i < num_created; i++) { char cmd[64]; - unsigned arena = pool->arena_index[i]; + unsigned arena = je_pool->arena_index[i]; snprintf(cmd, sizeof(cmd), "arena.%u.destroy", arena); (void)je_mallctl(cmd, NULL, 0, NULL, 0); } - umf_ba_global_free(pool); + if (je_pool->arena_index) { + umf_ba_global_free(je_pool->arena_index); + je_pool->arena_index = NULL; + } + umf_ba_global_free(je_pool); return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC; } +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return op_post_initialize(ctx); +} + +static void initialize_jemalloc_ctl(void) { + jemalloc_ctl_root.root[jemalloc_ctl_root.first_free++] = (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; +} + +static umf_result_t op_ctl(void *pool, umf_ctl_query_source_t operationType, + const char *name, void *arg, size_t size, + umf_ctl_query_type_t queryType, va_list args) { + (void)operationType; + (void)arg; + (void)size; + (void)args; + utils_init_once(&ctl_initialized, initialize_jemalloc_ctl); + return ctl_query(&jemalloc_ctl_root, pool, CTL_QUERY_PROGRAMMATIC, name, + queryType, arg, size, args); +} + static umf_result_t op_finalize(void *pool) { assert(pool); umf_result_t ret = UMF_RESULT_SUCCESS; @@ -577,6 +660,9 @@ static umf_result_t op_finalize(void *pool) { ret = UMF_RESULT_ERROR_UNKNOWN; } } + if (je_pool->arena_index) { + umf_ba_global_free(je_pool->arena_index); + } umf_ba_global_free(je_pool); VALGRIND_DO_DESTROY_MEMPOOL(pool); @@ -601,8 +687,37 @@ static umf_result_t op_get_last_allocation_error(void *pool) { } static umf_result_t op_get_name(void *pool, const char **name) { - (void)pool; - *name = "jemalloc"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (pool == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; + *name = je_pool->name; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t op_trim_memory(void *pool, size_t minBytesToKeep) { + // there is no way to tell jemalloc to keep a minimum number of bytes + // so we just purge all arenas + if (minBytesToKeep > 0) { + LOG_WARN("Ignoring minBytesToKeep (%zu) in jemalloc pool", + minBytesToKeep); + } + + jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool; + for (size_t i = 0; i < je_pool->n_arenas; i++) { + char cmd[64]; + unsigned arena = je_pool->arena_index[i]; + snprintf(cmd, sizeof(cmd), "arena.%u.purge", arena); + if (je_mallctl(cmd, NULL, NULL, NULL, 0)) { + LOG_ERR("Could not purge jemalloc arena %u", arena); + return UMF_RESULT_ERROR_UNKNOWN; + } + } + return UMF_RESULT_SUCCESS; } @@ -618,6 +733,8 @@ static umf_memory_pool_ops_t UMF_JEMALLOC_POOL_OPS = { .free = op_free, .get_last_allocation_error = op_get_last_allocation_error, .get_name = op_get_name, + .ext_ctl = op_ctl, + .ext_trim_memory = op_trim_memory, }; const umf_memory_pool_ops_t *umfJemallocPoolOps(void) { @@ -631,6 +748,8 @@ umfJemallocPoolParamsCreate(umf_jemalloc_pool_params_handle_t *hParams) { return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } memset(params, 0, sizeof(*params)); + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; *hParams = params; return UMF_RESULT_SUCCESS; } @@ -652,4 +771,23 @@ umfJemallocPoolParamsSetNumArenas(umf_jemalloc_pool_params_handle_t hParams, return UMF_RESULT_SUCCESS; } +umf_result_t +umfJemallocPoolParamsSetName(umf_jemalloc_pool_params_handle_t hParams, + const char *name) { + if (!hParams) { + LOG_ERR("jemalloc pool params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (!name) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + #endif /* UMF_POOL_JEMALLOC_ENABLED */ diff --git a/src/pool/pool_proxy.c b/src/pool/pool_proxy.c index c6bf74124f..6e256c4918 100644 --- a/src/pool/pool_proxy.c +++ b/src/pool/pool_proxy.c @@ -100,11 +100,22 @@ static umf_result_t proxy_free(void *pool, void *ptr) { struct proxy_memory_pool *hPool = (struct proxy_memory_pool *)pool; if (ptr) { - umf_alloc_info_t allocInfo = {NULL, 0, NULL}; - umf_result_t umf_result = umfMemoryTrackerGetAllocInfo(ptr, &allocInfo); - if (umf_result == UMF_RESULT_SUCCESS) { - size = allocInfo.baseSize; + umf_memory_properties_handle_t props = NULL; + umf_result_t umf_result = umfGetMemoryPropertiesHandle(ptr, &props); + + if (umf_result != UMF_RESULT_SUCCESS) { + TLS_last_allocation_error = umf_result; + LOG_ERR("failed to get allocation info from the memory tracker"); + return umf_result; + } + + if (props == NULL) { + TLS_last_allocation_error = UMF_RESULT_ERROR_UNKNOWN; + LOG_ERR("failed to get allocation info from the memory tracker"); + return UMF_RESULT_ERROR_UNKNOWN; } + + size = props->base_size; } return umfMemoryProviderFree(hPool->hProvider, ptr, size); @@ -147,7 +158,9 @@ static umf_memory_pool_ops_t UMF_PROXY_POOL_OPS = { .malloc_usable_size = proxy_malloc_usable_size, .free = proxy_free, .get_last_allocation_error = proxy_get_last_allocation_error, - .get_name = proxy_get_name}; + .get_name = proxy_get_name, + .ext_trim_memory = NULL, // not supported +}; const umf_memory_pool_ops_t *umfProxyPoolOps(void) { return &UMF_PROXY_POOL_OPS; diff --git a/src/pool/pool_scalable.c b/src/pool/pool_scalable.c index 982a3408dc..b5211615b8 100644 --- a/src/pool/pool_scalable.c +++ b/src/pool/pool_scalable.c @@ -36,6 +36,7 @@ static __TLS umf_result_t TLS_last_allocation_error; static __TLS umf_result_t TLS_last_free_error; static const size_t DEFAULT_GRANULARITY = 2 * 1024 * 1024; // 2MB +static const char *DEFAULT_NAME = "scalable"; typedef struct tbb_mem_pool_policy_t { raw_alloc_tbb_type pAlloc; @@ -48,6 +49,7 @@ typedef struct tbb_mem_pool_policy_t { typedef struct umf_scalable_pool_params_t { size_t granularity; bool keep_all_memory; + char name[64]; } umf_scalable_pool_params_t; typedef struct tbb_callbacks_t { @@ -69,7 +71,9 @@ typedef struct tbb_callbacks_t { typedef struct tbb_memory_pool_t { umf_memory_provider_handle_t mem_provider; + umf_scalable_pool_params_t params; void *tbb_pool; + char name[64]; } tbb_memory_pool_t; typedef enum tbb_enums_t { @@ -216,6 +220,8 @@ umfScalablePoolParamsCreate(umf_scalable_pool_params_handle_t *hParams) { params_data->granularity = DEFAULT_GRANULARITY; params_data->keep_all_memory = false; + strncpy(params_data->name, DEFAULT_NAME, sizeof(params_data->name) - 1); + params_data->name[sizeof(params_data->name) - 1] = '\0'; *hParams = (umf_scalable_pool_params_handle_t)params_data; @@ -265,8 +271,54 @@ umfScalablePoolParamsSetKeepAllMemory(umf_scalable_pool_params_handle_t hParams, return UMF_RESULT_SUCCESS; } +umf_result_t +umfScalablePoolParamsSetName(umf_scalable_pool_params_handle_t hParams, + const char *name) { + if (!hParams) { + LOG_ERR("scalable pool params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (!name) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, const void *params, void **pool) { + tbb_memory_pool_t *pool_data = + umf_ba_global_alloc(sizeof(tbb_memory_pool_t)); + if (!pool_data) { + LOG_ERR("cannot allocate memory for metadata"); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + + memset(pool_data, 0, sizeof(*pool_data)); + pool_data->mem_provider = provider; + + if (params) { + pool_data->params = *(const umf_scalable_pool_params_t *)params; + } else { + // Set default values + memset(&pool_data->params, 0, sizeof(pool_data->params)); + pool_data->params.granularity = DEFAULT_GRANULARITY; + pool_data->params.keep_all_memory = false; + strncpy(pool_data->params.name, DEFAULT_NAME, + sizeof(pool_data->params.name) - 1); + } + + *pool = (void *)pool_data; + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t tbb_pool_post_initialize(void *pool) { tbb_mem_pool_policy_t policy = {.pAlloc = tbb_raw_alloc_wrapper, .pFree = tbb_raw_free_wrapper, .granularity = DEFAULT_GRANULARITY, @@ -275,19 +327,17 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, .keep_all_memory = false, .reserved = 0}; - // If params is provided, override defaults - if (params) { - const umf_scalable_pool_params_t *scalable_params = params; - policy.granularity = scalable_params->granularity; - policy.keep_all_memory = scalable_params->keep_all_memory; - } + assert(pool); + tbb_memory_pool_t *pool_data = (tbb_memory_pool_t *)pool; - tbb_memory_pool_t *pool_data = - umf_ba_global_alloc(sizeof(tbb_memory_pool_t)); - if (!pool_data) { - LOG_ERR("cannot allocate memory for metadata"); - return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; - } + const umf_scalable_pool_params_t *scalable_params = &pool_data->params; + const char *pool_name = scalable_params->name; + + // Use stored params + policy.granularity = scalable_params->granularity; + policy.keep_all_memory = scalable_params->keep_all_memory; + + snprintf(pool_data->name, sizeof(pool_data->name), "%s", pool_name); umf_result_t res = UMF_RESULT_SUCCESS; int ret = init_tbb_callbacks(); @@ -297,7 +347,6 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, goto err_tbb_init; } - pool_data->mem_provider = provider; ret = tbb_callbacks.pool_create_v1((intptr_t)pool_data, &policy, &(pool_data->tbb_pool)); if (ret != 0 /* TBBMALLOC_OK */) { @@ -305,8 +354,6 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider, goto err_tbb_init; } - *pool = (void *)pool_data; - return res; err_tbb_init: @@ -422,19 +469,46 @@ static umf_result_t tbb_get_last_allocation_error(void *pool) { return TLS_last_allocation_error; } +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return tbb_pool_post_initialize(ctx); +} + +static void initialize_pool_ctl(void) { + pool_scallable_ctl_root.root[pool_scallable_ctl_root.first_free++] = + (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; +} + static umf_result_t pool_ctl(void *hPool, umf_ctl_query_source_t operationType, const char *name, void *arg, size_t size, umf_ctl_query_type_t query_type, va_list args) { (void)operationType; // unused - umf_memory_pool_handle_t pool_provider = (umf_memory_pool_handle_t)hPool; - utils_init_once(&ctl_initialized, NULL); - return ctl_query(&pool_scallable_ctl_root, pool_provider->pool_priv, - CTL_QUERY_PROGRAMMATIC, name, query_type, arg, size, args); + + utils_init_once(&ctl_initialized, initialize_pool_ctl); + return ctl_query(&pool_scallable_ctl_root, hPool, CTL_QUERY_PROGRAMMATIC, + name, query_type, arg, size, args); } static umf_result_t scalable_get_name(void *pool, const char **name) { - (void)pool; // unused - *name = "scalable"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (pool == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + tbb_memory_pool_t *pool_data = (tbb_memory_pool_t *)pool; + *name = pool_data->name; return UMF_RESULT_SUCCESS; } @@ -451,6 +525,7 @@ static umf_memory_pool_ops_t UMF_SCALABLE_POOL_OPS = { .get_last_allocation_error = tbb_get_last_allocation_error, .ext_ctl = pool_ctl, .get_name = scalable_get_name, + .ext_trim_memory = NULL, // not supported }; const umf_memory_pool_ops_t *umfScalablePoolOps(void) { diff --git a/src/provider/provider_ctl_stats_impl.h b/src/provider/provider_ctl_stats_impl.h index 6f1fdf910b..72ffa93fb4 100644 --- a/src/provider/provider_ctl_stats_impl.h +++ b/src/provider/provider_ctl_stats_impl.h @@ -74,7 +74,7 @@ static const umf_ctl_node_t CTL_NODE(peak_memory)[] = {CTL_LEAF_RUNNABLE(reset), static const umf_ctl_node_t CTL_NODE(stats)[] = { CTL_LEAF_RO(allocated_memory), CTL_LEAF_RO(peak_memory), - CTL_CHILD(peak_memory), CTL_LEAF_RUNNABLE(reset), CTL_NODE_END}; + CTL_CHILD(peak_memory), CTL_NODE_END}; static inline void provider_ctl_stats_alloc(CTL_PROVIDER_TYPE *provider, size_t size) { diff --git a/src/provider/provider_cuda.c b/src/provider/provider_cuda.c index 953876fc12..304452f2c1 100644 --- a/src/provider/provider_cuda.c +++ b/src/provider/provider_cuda.c @@ -12,6 +12,7 @@ #include #include +#include "memory_provider_internal.h" #include "provider_ctl_stats_type.h" #include "provider_cuda_internal.h" #include "utils_load_library.h" @@ -55,6 +56,7 @@ typedef struct cu_memory_provider_t { size_t min_alignment; unsigned int alloc_flags; ctl_stats_t stats; + char name[64]; } cu_memory_provider_t; #define CTL_PROVIDER_TYPE cu_memory_provider_t @@ -73,6 +75,7 @@ typedef struct umf_cuda_memory_provider_params_t { // Allocation flags for cuMemHostAlloc/cuMemAllocManaged unsigned int alloc_flags; + char name[64]; } umf_cuda_memory_provider_params_t; typedef struct cu_ops_t { @@ -106,12 +109,26 @@ static bool Init_cu_global_state_failed; struct ctl cu_memory_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; +static umf_result_t cu_memory_provider_post_initialize(void *provider); +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return cu_memory_provider_post_initialize(ctx); +} + // forward decl needed for alloc static umf_result_t cu_memory_provider_free(void *provider, void *ptr, size_t bytes); #define TLS_MSG_BUF_LEN 1024 +static const char *DEFAULT_NAME = "CUDA"; + typedef struct cu_last_native_error_t { CUresult native_error; char msg_buff[TLS_MSG_BUF_LEN]; @@ -143,6 +160,11 @@ static umf_result_t cu2umf_result(CUresult result) { static void initialize_cu_ctl(void) { CTL_REGISTER_MODULE(&cu_memory_ctl_root, stats); + cu_memory_ctl_root.root[cu_memory_ctl_root.first_free++] = (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; } static void init_cu_global_state(void) { @@ -248,6 +270,8 @@ umf_result_t umfCUDAMemoryProviderParamsCreate( params_data->memory_type = UMF_MEMORY_TYPE_UNKNOWN; params_data->alloc_flags = 0; + strncpy(params_data->name, DEFAULT_NAME, sizeof(params_data->name) - 1); + params_data->name[sizeof(params_data->name) - 1] = '\0'; *hParams = params_data; @@ -310,6 +334,24 @@ umf_result_t umfCUDAMemoryProviderParamsSetAllocFlags( return UMF_RESULT_SUCCESS; } +umf_result_t umfCUDAMemoryProviderParamsSetName( + umf_cuda_memory_provider_params_handle_t hParams, const char *name) { + if (!hParams) { + LOG_ERR("CUDA Memory Provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (!name) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + static umf_result_t cu_memory_provider_initialize(const void *params, void **provider) { if (params == NULL) { @@ -345,15 +387,37 @@ static umf_result_t cu_memory_provider_initialize(const void *params, if (!cu_provider) { return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } - memset(cu_provider, 0, sizeof(cu_memory_provider_t)); + memset(cu_provider, 0, sizeof(*cu_provider)); + snprintf(cu_provider->name, sizeof(cu_provider->name), "%s", + cu_params->name); + + cu_provider->context = cu_params->cuda_context_handle; + cu_provider->device = cu_params->cuda_device_handle; + cu_provider->memory_type = cu_params->memory_type; + cu_provider->alloc_flags = cu_params->alloc_flags; + + *provider = cu_provider; + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t cu_memory_provider_finalize(void *provider) { + umf_ba_global_free(provider); + return UMF_RESULT_SUCCESS; +} + +static umf_result_t cu_memory_provider_post_initialize(void *provider) { + cu_memory_provider_t *cu_provider = (cu_memory_provider_t *)provider; + + assert(provider); // CUDA alloc functions doesn't allow to provide user alignment - get the // minimum one from the driver size_t min_alignment = 0; CUmemAllocationProp allocProps = {0}; allocProps.location.type = CU_MEM_LOCATION_TYPE_DEVICE; allocProps.type = CU_MEM_ALLOCATION_TYPE_PINNED; - allocProps.location.id = cu_params->cuda_device_handle; + allocProps.location.id = cu_provider->device; CUresult cu_result = g_cu_ops.cuMemGetAllocationGranularity( &min_alignment, &allocProps, CU_MEM_ALLOC_GRANULARITY_MINIMUM); if (cu_result != CUDA_SUCCESS) { @@ -361,29 +425,16 @@ static umf_result_t cu_memory_provider_initialize(const void *params, return cu2umf_result(cu_result); } - cu_provider->context = cu_params->cuda_context_handle; - cu_provider->device = cu_params->cuda_device_handle; - cu_provider->memory_type = cu_params->memory_type; cu_provider->min_alignment = min_alignment; // If the memory type is shared (CUDA managed), the allocation flags must // be set. NOTE: we do not check here if the flags are valid - // this will be done by CUDA runtime. - if (cu_params->memory_type == UMF_MEMORY_TYPE_SHARED && - cu_params->alloc_flags == 0) { + if (cu_provider->memory_type == UMF_MEMORY_TYPE_SHARED && + cu_provider->alloc_flags == 0) { // the default setting is CU_MEM_ATTACH_GLOBAL cu_provider->alloc_flags = CU_MEM_ATTACH_GLOBAL; - } else { - cu_provider->alloc_flags = cu_params->alloc_flags; } - - *provider = cu_provider; - - return UMF_RESULT_SUCCESS; -} - -static umf_result_t cu_memory_provider_finalize(void *provider) { - umf_ba_global_free(provider); return UMF_RESULT_SUCCESS; } @@ -620,8 +671,15 @@ cu_memory_provider_get_recommended_page_size(void *provider, size_t size, static umf_result_t cu_memory_provider_get_name(void *provider, const char **name) { - (void)provider; - *name = "CUDA"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + cu_memory_provider_t *cu_provider = (cu_memory_provider_t *)provider; + *name = cu_provider->name; return UMF_RESULT_SUCCESS; } @@ -712,6 +770,61 @@ static umf_result_t cu_ctl(void *provider, umf_ctl_query_source_t operationType, query_type, arg, size, args); } +static umf_result_t cu_memory_provider_get_allocation_properties( + void *provider, const void *ptr, + umf_memory_property_id_t memory_property_id, void *value) { + + // unused + (void)ptr; + + cu_memory_provider_t *cuda_provider = (cu_memory_provider_t *)provider; + + switch (memory_property_id) { + case UMF_MEMORY_PROPERTY_POINTER_TYPE: + *(umf_usm_memory_type_t *)value = cuda_provider->memory_type; + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_CONTEXT: + *(CUcontext *)value = cuda_provider->context; + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_DEVICE: + *(CUdevice *)value = cuda_provider->device; + return UMF_RESULT_SUCCESS; + + default: + break; + }; + + return UMF_RESULT_ERROR_INVALID_ARGUMENT; +} + +static umf_result_t cu_memory_provider_get_allocation_properties_size( + void *provider, umf_memory_property_id_t memory_property_id, size_t *size) { + + // unused + (void)provider; + + switch (memory_property_id) { + case UMF_MEMORY_PROPERTY_POINTER_TYPE: + *size = sizeof(umf_usm_memory_type_t); + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_CONTEXT: + *size = sizeof(CUcontext); + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_DEVICE: + *size = sizeof(CUdevice); + return UMF_RESULT_SUCCESS; + + default: + break; + }; + + return UMF_RESULT_ERROR_INVALID_ARGUMENT; +} + static umf_memory_provider_ops_t UMF_CUDA_MEMORY_PROVIDER_OPS = { .version = UMF_PROVIDER_OPS_VERSION_CURRENT, .initialize = cu_memory_provider_initialize, @@ -735,6 +848,10 @@ static umf_memory_provider_ops_t UMF_CUDA_MEMORY_PROVIDER_OPS = { .ext_open_ipc_handle = cu_memory_provider_open_ipc_handle, .ext_close_ipc_handle = cu_memory_provider_close_ipc_handle, .ext_ctl = cu_ctl, + .ext_get_allocation_properties = + cu_memory_provider_get_allocation_properties, + .ext_get_allocation_properties_size = + cu_memory_provider_get_allocation_properties_size, }; const umf_memory_provider_ops_t *umfCUDAMemoryProviderOps(void) { @@ -790,6 +907,14 @@ umf_result_t umfCUDAMemoryProviderParamsSetAllocFlags( return UMF_RESULT_ERROR_NOT_SUPPORTED; } +umf_result_t umfCUDAMemoryProviderParamsSetName( + umf_cuda_memory_provider_params_handle_t hParams, const char *name) { + (void)hParams; + (void)name; + LOG_ERR("CUDA provider is disabled (UMF_BUILD_CUDA_PROVIDER is OFF)!"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + const umf_memory_provider_ops_t *umfCUDAMemoryProviderOps(void) { // not supported LOG_ERR("CUDA provider is disabled (UMF_BUILD_CUDA_PROVIDER is OFF)!"); diff --git a/src/provider/provider_devdax_memory.c b/src/provider/provider_devdax_memory.c index c495e48ac7..6addea3502 100644 --- a/src/provider/provider_devdax_memory.c +++ b/src/provider/provider_devdax_memory.c @@ -19,7 +19,7 @@ #include "utils_log.h" -#if defined(_WIN32) || defined(UMF_NO_HWLOC) +#if defined(_WIN32) const umf_memory_provider_ops_t *umfDevDaxMemoryProviderOps(void) { // not supported @@ -62,7 +62,15 @@ umf_result_t umfDevDaxMemoryProviderParamsSetProtection( return UMF_RESULT_ERROR_NOT_SUPPORTED; } -#else // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +umf_result_t umfDevDaxMemoryProviderParamsSetName( + umf_devdax_memory_provider_params_handle_t hParams, const char *name) { + (void)hParams; + (void)name; + LOG_ERR("DevDax memory provider is disabled!"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + +#else // !defined(_WIN32) #include "base_alloc_global.h" #include "coarse.h" @@ -76,6 +84,8 @@ umf_result_t umfDevDaxMemoryProviderParamsSetProtection( #define TLS_MSG_BUF_LEN 1024 +static const char *DEFAULT_NAME = "DEVDAX"; + typedef struct devdax_memory_provider_t { char path[PATH_MAX]; // a path to the device DAX size_t size; // size of the file used for memory mapping @@ -85,6 +95,7 @@ typedef struct devdax_memory_provider_t { unsigned protection; // combination of OS-specific protection flags coarse_t *coarse; // coarse library handle ctl_stats_t stats; + char name[64]; } devdax_memory_provider_t; #define CTL_PROVIDER_TYPE devdax_memory_provider_t @@ -95,6 +106,7 @@ typedef struct umf_devdax_memory_provider_params_t { char *path; size_t size; unsigned protection; + char name[64]; } umf_devdax_memory_provider_params_t; typedef struct devdax_last_native_error_t { @@ -186,6 +198,8 @@ static umf_result_t devdax_initialize(const void *params, void **provider) { } memset(devdax_provider, 0, sizeof(*devdax_provider)); + snprintf(devdax_provider->name, sizeof(devdax_provider->name), "%s", + in_params->name); coarse_params_t coarse_params = {0}; coarse_params.provider = devdax_provider; @@ -381,8 +395,16 @@ static umf_result_t devdax_purge_force(void *provider, void *ptr, size_t size) { } static umf_result_t devdax_get_name(void *provider, const char **name) { - (void)provider; // unused - *name = "DEVDAX"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + devdax_memory_provider_t *devdax_provider = + (devdax_memory_provider_t *)provider; + *name = devdax_provider->name; return UMF_RESULT_SUCCESS; } @@ -586,7 +608,8 @@ static umf_memory_provider_ops_t UMF_DEVDAX_MEMORY_PROVIDER_OPS = { .ext_put_ipc_handle = devdax_put_ipc_handle, .ext_open_ipc_handle = devdax_open_ipc_handle, .ext_close_ipc_handle = devdax_close_ipc_handle, - .ext_ctl = devdax_ctl}; + .ext_ctl = devdax_ctl, +}; const umf_memory_provider_ops_t *umfDevDaxMemoryProviderOps(void) { return &UMF_DEVDAX_MEMORY_PROVIDER_OPS; @@ -614,6 +637,9 @@ umf_result_t umfDevDaxMemoryProviderParamsCreate( return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; + params->path = NULL; params->size = 0; params->protection = UMF_PROTECTION_READ | UMF_PROTECTION_WRITE; @@ -698,4 +724,22 @@ umf_result_t umfDevDaxMemoryProviderParamsSetProtection( return UMF_RESULT_SUCCESS; } -#endif // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +umf_result_t umfDevDaxMemoryProviderParamsSetName( + umf_devdax_memory_provider_params_handle_t hParams, const char *name) { + if (hParams == NULL) { + LOG_ERR("DevDax Memory Provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (name == NULL) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + +#endif // !defined(_WIN32) diff --git a/src/provider/provider_file_memory.c b/src/provider/provider_file_memory.c index 210b90ceed..d666a2fd71 100644 --- a/src/provider/provider_file_memory.c +++ b/src/provider/provider_file_memory.c @@ -20,7 +20,7 @@ #include "utils_log.h" -#if defined(_WIN32) || defined(UMF_NO_HWLOC) +#if defined(_WIN32) const umf_memory_provider_ops_t *umfFileMemoryProviderOps(void) { // not supported @@ -68,7 +68,15 @@ umf_result_t umfFileMemoryProviderParamsSetVisibility( return UMF_RESULT_ERROR_NOT_SUPPORTED; } -#else // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +umf_result_t umfFileMemoryProviderParamsSetName( + umf_file_memory_provider_params_handle_t hParams, const char *name) { + (void)hParams; + (void)name; + LOG_ERR("File memory provider is disabled!"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + +#else // !defined(_WIN32) #include "base_alloc_global.h" #include "coarse.h" @@ -83,6 +91,8 @@ umf_result_t umfFileMemoryProviderParamsSetVisibility( #define TLS_MSG_BUF_LEN 1024 +static const char *DEFAULT_NAME = "FILE"; + typedef struct file_memory_provider_t { utils_mutex_t lock; // lock for file parameters (size and offsets) @@ -113,7 +123,9 @@ typedef struct file_memory_provider_t { critnib *fd_offset_map; coarse_t *coarse; // coarse library handle + ctl_stats_t stats; + char name[64]; } file_memory_provider_t; #define CTL_PROVIDER_TYPE file_memory_provider_t @@ -124,6 +136,7 @@ typedef struct umf_file_memory_provider_params_t { char *path; unsigned protection; umf_memory_visibility_t visibility; + char name[64]; } umf_file_memory_provider_params_t; typedef struct file_last_native_error_t { @@ -147,6 +160,18 @@ static __TLS file_last_native_error_t TLS_last_native_error; struct ctl file_memory_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; +static umf_result_t file_post_initialize(void *provider); +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return file_post_initialize(ctx); +} + static const char *Native_error_str[] = { [_UMF_FILE_RESULT_SUCCESS] = "success", [_UMF_FILE_RESULT_ERROR_ALLOC_FAILED] = "memory allocation failed", @@ -162,6 +187,12 @@ static void file_store_last_native_error(int32_t native_error, static void initialize_file_ctl(void) { CTL_REGISTER_MODULE(&file_memory_ctl_root, stats); + file_memory_ctl_root.root[file_memory_ctl_root.first_free++] = + (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; } static umf_result_t @@ -218,6 +249,8 @@ static umf_result_t file_initialize(const void *params, void **provider) { } memset(file_provider, 0, sizeof(*file_provider)); + snprintf(file_provider->name, sizeof(file_provider->name), "%s", + in_params->name); ret = file_translate_params(in_params, file_provider); if (ret != UMF_RESULT_SUCCESS) { @@ -280,6 +313,22 @@ static umf_result_t file_initialize(const void *params, void **provider) { file_provider->coarse = coarse; + *provider = file_provider; + return UMF_RESULT_SUCCESS; + +err_close_fd: + utils_close_fd(file_provider->fd); +err_free_file_provider: + umf_ba_global_free(file_provider); + return ret; +} + +static umf_result_t file_post_initialize(void *provider) { + umf_result_t ret = UMF_RESULT_SUCCESS; + file_memory_provider_t *file_provider = provider; + + assert(provider); + if (utils_mutex_init(&file_provider->lock) == NULL) { LOG_ERR("lock init failed"); ret = UMF_RESULT_ERROR_UNKNOWN; @@ -300,8 +349,6 @@ static umf_result_t file_initialize(const void *params, void **provider) { goto err_delete_fd_offset_map; } - *provider = file_provider; - return UMF_RESULT_SUCCESS; err_delete_fd_offset_map: @@ -310,9 +357,9 @@ static umf_result_t file_initialize(const void *params, void **provider) { utils_mutex_destroy_not_free(&file_provider->lock); err_coarse_delete: coarse_delete(file_provider->coarse); -err_close_fd: - utils_close_fd(file_provider->fd); -err_free_file_provider: + if (utils_close_fd(file_provider->fd)) { + LOG_PERR("closing file descriptor %d failed", file_provider->fd); + } umf_ba_global_free(file_provider); return ret; } @@ -649,8 +696,15 @@ static umf_result_t file_purge_force(void *provider, void *ptr, size_t size) { } static umf_result_t file_get_name(void *provider, const char **name) { - (void)provider; // unused - *name = "FILE"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + file_memory_provider_t *file_provider = (file_memory_provider_t *)provider; + *name = file_provider->name; return UMF_RESULT_SUCCESS; } @@ -913,7 +967,8 @@ static umf_memory_provider_ops_t UMF_FILE_MEMORY_PROVIDER_OPS = { .ext_put_ipc_handle = file_put_ipc_handle, .ext_open_ipc_handle = file_open_ipc_handle, .ext_close_ipc_handle = file_close_ipc_handle, - .ext_ctl = file_ctl}; + .ext_ctl = file_ctl, +}; const umf_memory_provider_ops_t *umfFileMemoryProviderOps(void) { return &UMF_FILE_MEMORY_PROVIDER_OPS; @@ -942,6 +997,8 @@ umf_result_t umfFileMemoryProviderParamsCreate( params->path = NULL; params->protection = UMF_PROTECTION_READ | UMF_PROTECTION_WRITE; params->visibility = UMF_MEM_MAP_PRIVATE; + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; umf_result_t res = umfFileMemoryProviderParamsSetPath(params, path); if (res != UMF_RESULT_SUCCESS) { @@ -1023,4 +1080,22 @@ umf_result_t umfFileMemoryProviderParamsSetVisibility( return UMF_RESULT_SUCCESS; } -#endif // !defined(_WIN32) && !defined(UMF_NO_HWLOC) +umf_result_t umfFileMemoryProviderParamsSetName( + umf_file_memory_provider_params_handle_t hParams, const char *name) { + if (hParams == NULL) { + LOG_ERR("File Memory Provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (name == NULL) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + +#endif // !defined(_WIN32) diff --git a/src/provider/provider_fixed_memory.c b/src/provider/provider_fixed_memory.c index 08fd3e7f64..4e8f22e4fb 100644 --- a/src/provider/provider_fixed_memory.c +++ b/src/provider/provider_fixed_memory.c @@ -28,17 +28,21 @@ #define TLS_MSG_BUF_LEN 1024 +static const char *DEFAULT_NAME = "FIXED"; + typedef struct fixed_memory_provider_t { void *base; // base address of memory size_t size; // size of the memory region coarse_t *coarse; // coarse library handle ctl_stats_t stats; + char name[64]; } fixed_memory_provider_t; // Fixed Memory provider settings struct typedef struct umf_fixed_memory_provider_params_t { void *ptr; size_t size; + char name[64]; } umf_fixed_memory_provider_params_t; typedef struct fixed_last_native_error_t { @@ -61,8 +65,26 @@ static __TLS fixed_last_native_error_t TLS_last_native_error; struct ctl fixed_memory_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; +static umf_result_t fixed_post_initialize(void *provider); +static umf_result_t +CTL_RUNNABLE_HANDLER(post_initialize)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source; + (void)arg; + (void)size; + (void)indexes; + return fixed_post_initialize(ctx); +} + static void initialize_fixed_ctl(void) { CTL_REGISTER_MODULE(&fixed_memory_ctl_root, stats); + fixed_memory_ctl_root.root[fixed_memory_ctl_root.first_free++] = + (umf_ctl_node_t){ + .name = "post_initialize", + .type = CTL_NODE_LEAF, + .runnable_cb = CTL_RUNNABLE_HANDLER(post_initialize), + }; } static const char *Native_error_str[] = { @@ -110,6 +132,8 @@ static umf_result_t fixed_initialize(const void *params, void **provider) { } memset(fixed_provider, 0, sizeof(*fixed_provider)); + snprintf(fixed_provider->name, sizeof(fixed_provider->name), "%s", + in_params->name); coarse_params_t coarse_params = {0}; coarse_params.provider = fixed_provider; @@ -135,20 +159,10 @@ static umf_result_t fixed_initialize(const void *params, void **provider) { fixed_provider->base = in_params->ptr; fixed_provider->size = in_params->size; - // add the entire memory as a single block - ret = coarse_add_memory_fixed(coarse, fixed_provider->base, - fixed_provider->size); - if (ret != UMF_RESULT_SUCCESS) { - LOG_ERR("adding memory block failed"); - goto err_coarse_delete; - } - *provider = fixed_provider; return UMF_RESULT_SUCCESS; -err_coarse_delete: - coarse_delete(fixed_provider->coarse); err_free_fixed_provider: umf_ba_global_free(fixed_provider); return ret; @@ -251,8 +265,16 @@ static umf_result_t fixed_purge_force(void *provider, void *ptr, size_t size) { } static umf_result_t fixed_get_name(void *provider, const char **name) { - (void)provider; // unused - *name = "FIXED"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + fixed_memory_provider_t *fixed_provider = + (fixed_memory_provider_t *)provider; + *name = fixed_provider->name; return UMF_RESULT_SUCCESS; } @@ -283,6 +305,22 @@ static umf_result_t fixed_free(void *provider, void *ptr, size_t size) { return ret; } +static umf_result_t fixed_post_initialize(void *provider) { + fixed_memory_provider_t *fixed_provider = + (fixed_memory_provider_t *)provider; + assert(provider); + + umf_result_t ret = coarse_add_memory_fixed( + fixed_provider->coarse, fixed_provider->base, fixed_provider->size); + if (ret != UMF_RESULT_SUCCESS) { + LOG_ERR("adding memory block failed"); + coarse_delete(fixed_provider->coarse); + umf_ba_global_free(fixed_provider); + return ret; + } + return UMF_RESULT_SUCCESS; +} + static umf_result_t fixed_ctl(void *provider, umf_ctl_query_source_t operationType, const char *name, void *arg, size_t size, @@ -311,7 +349,8 @@ static umf_memory_provider_ops_t UMF_FIXED_MEMORY_PROVIDER_OPS = { .ext_put_ipc_handle = NULL, .ext_open_ipc_handle = NULL, .ext_close_ipc_handle = NULL, - .ext_ctl = fixed_ctl}; + .ext_ctl = fixed_ctl, +}; const umf_memory_provider_ops_t *umfFixedMemoryProviderOps(void) { return &UMF_FIXED_MEMORY_PROVIDER_OPS; @@ -333,6 +372,9 @@ umf_result_t umfFixedMemoryProviderParamsCreate( return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; + umf_result_t ret = umfFixedMemoryProviderParamsSetMemory(params, ptr, size); if (ret != UMF_RESULT_SUCCESS) { umf_ba_global_free(params); @@ -375,3 +417,21 @@ umf_result_t umfFixedMemoryProviderParamsSetMemory( hParams->size = size; return UMF_RESULT_SUCCESS; } + +umf_result_t umfFixedMemoryProviderParamsSetName( + umf_fixed_memory_provider_params_handle_t hParams, const char *name) { + if (hParams == NULL) { + LOG_ERR("Memory Provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (name == NULL) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} diff --git a/src/provider/provider_level_zero.c b/src/provider/provider_level_zero.c index d5e79b244d..7e9f322316 100644 --- a/src/provider/provider_level_zero.c +++ b/src/provider/provider_level_zero.c @@ -14,8 +14,10 @@ #include #include +#include "memory_provider_internal.h" #include "provider_ctl_stats_type.h" #include "provider_level_zero_internal.h" +#include "provider_tracking.h" #include "utils_load_library.h" #include "utils_log.h" @@ -32,6 +34,7 @@ void fini_ze_global_state(void) { #include "base_alloc_global.h" #include "libumf.h" +#include "provider_level_zero_internal.h" #include "utils_assert.h" #include "utils_common.h" #include "utils_concurrency.h" @@ -41,22 +44,29 @@ void fini_ze_global_state(void) { // Level Zero Memory Provider settings struct typedef struct umf_level_zero_memory_provider_params_t { - ze_context_handle_t - level_zero_context_handle; ///< Handle to the Level Zero context - ze_device_handle_t - level_zero_device_handle; ///< Handle to the Level Zero device + // Handle to the Level Zero context + ze_context_handle_t level_zero_context_handle; + + // Handle to the Level Zero device + ze_device_handle_t level_zero_device_handle; + + // Allocation memory type + umf_usm_memory_type_t memory_type; + + // Array of devices for which the memory should be made resident + ze_device_handle_t *resident_device_handles; - umf_usm_memory_type_t memory_type; ///< Allocation memory type + // Number of devices for which the memory should be made resident + uint32_t resident_device_count; - ze_device_handle_t * - resident_device_handles; ///< Array of devices for which the memory should be made resident - uint32_t - resident_device_count; ///< Number of devices for which the memory should be made resident + // Memory free policy + umf_level_zero_memory_provider_free_policy_t freePolicy; - umf_level_zero_memory_provider_free_policy_t - freePolicy; ///< Memory free policy + // Memory exchange policy 0 = IPC (default), 1 = import/export + int use_import_export_for_IPC; uint32_t device_ordinal; + char name[64]; } umf_level_zero_memory_provider_params_t; typedef struct ze_memory_provider_t { @@ -64,16 +74,22 @@ typedef struct ze_memory_provider_t { ze_device_handle_t device; ze_memory_type_t memory_type; + utils_rwlock_t resident_device_rwlock; ze_device_handle_t *resident_device_handles; uint32_t resident_device_count; + uint32_t resident_device_capacity; ze_device_properties_t device_properties; ze_driver_memory_free_policy_ext_flags_t freePolicyFlags; + // Memory exchange policy 0 = IPC (default), 1 = import/export + int use_import_export_for_IPC; + size_t min_page_size; uint32_t device_ordinal; + char name[64]; ctl_stats_t stats; } ze_memory_provider_t; @@ -81,14 +97,14 @@ typedef struct ze_memory_provider_t { typedef struct ze_ops_t { ze_result_t (*zeMemAllocHost)(ze_context_handle_t, const ze_host_mem_alloc_desc_t *, size_t, - size_t, void *); + size_t, void **); ze_result_t (*zeMemAllocDevice)(ze_context_handle_t, const ze_device_mem_alloc_desc_t *, size_t, - size_t, ze_device_handle_t, void *); + size_t, ze_device_handle_t, void **); ze_result_t (*zeMemAllocShared)(ze_context_handle_t, const ze_device_mem_alloc_desc_t *, const ze_host_mem_alloc_desc_t *, size_t, - size_t, ze_device_handle_t, void *); + size_t, ze_device_handle_t, void **); ze_result_t (*zeMemFree)(ze_context_handle_t, void *); ze_result_t (*zeMemGetIpcHandle)(ze_context_handle_t, const void *, ze_ipc_mem_handle_t *); @@ -100,6 +116,8 @@ typedef struct ze_ops_t { ze_result_t (*zeContextMakeMemoryResident)(ze_context_handle_t, ze_device_handle_t, void *, size_t); + ze_result_t (*zeContextEvictMemory)(ze_context_handle_t, ze_device_handle_t, + void *, size_t); ze_result_t (*zeDeviceGetProperties)(ze_device_handle_t, ze_device_properties_t *); ze_result_t (*zeMemFreeExt)(ze_context_handle_t, @@ -113,6 +131,7 @@ static ze_ops_t g_ze_ops; static UTIL_ONCE_FLAG ze_is_initialized = UTIL_ONCE_FLAG_INIT; static bool Init_ze_global_state_failed; static __TLS ze_result_t TLS_last_native_error; +static const char *DEFAULT_NAME = "LEVEL_ZERO"; static void store_last_native_error(int32_t native_error) { TLS_last_native_error = native_error; @@ -124,7 +143,56 @@ static void store_last_native_error(int32_t native_error) { struct ctl ze_memory_ctl_root; static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT; +static ze_relaxed_allocation_limits_exp_desc_t relaxed_device_allocation_desc = + {.stype = ZE_STRUCTURE_TYPE_RELAXED_ALLOCATION_LIMITS_EXP_DESC, + .pNext = NULL, + .flags = ZE_RELAXED_ALLOCATION_LIMITS_EXP_FLAG_MAX_SIZE}; + +static ze_external_memory_export_desc_t memory_export_desc = { + .stype = ZE_STRUCTURE_TYPE_EXTERNAL_MEMORY_EXPORT_DESC, + .pNext = NULL, + .flags = ZE_EXTERNAL_MEMORY_TYPE_FLAG_OPAQUE_WIN32}; + +static umf_result_t CTL_READ_HANDLER(use_import_export_for_IPC)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + + if (arg == NULL || size != sizeof(int)) { + LOG_ERR("arg is NULL or size is not valid"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + int *arg_out = arg; + ze_memory_provider_t *ze_provider = (ze_memory_provider_t *)ctx; + *arg_out = ze_provider->use_import_export_for_IPC; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t CTL_WRITE_HANDLER(use_import_export_for_IPC)( + void *ctx, umf_ctl_query_source_t source, void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + (void)source, (void)indexes; + + if (arg == NULL || size != sizeof(int)) { + LOG_ERR("arg is NULL or size is not valid"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + int arg_in = *(int *)arg; + ze_memory_provider_t *ze_provider = (ze_memory_provider_t *)ctx; + ze_provider->use_import_export_for_IPC = arg_in; + return UMF_RESULT_SUCCESS; +} + +static const struct ctl_argument + CTL_ARG(use_import_export_for_IPC) = CTL_ARG_INT; + +static const umf_ctl_node_t CTL_NODE(params)[] = { + CTL_LEAF_RW(use_import_export_for_IPC), CTL_NODE_END}; + static void initialize_ze_ctl(void) { + CTL_REGISTER_MODULE(&ze_memory_ctl_root, params); CTL_REGISTER_MODULE(&ze_memory_ctl_root, stats); } @@ -169,11 +237,18 @@ static ze_memory_type_t umf2ze_memory_type(umf_usm_memory_type_t memory_type) { } static void init_ze_global_state(void) { + + const char *lib_name = getenv("UMF_ZE_LOADER_LIB_NAME"); + if (lib_name != NULL && lib_name[0] != '\0') { + LOG_INFO("Using custom ze_loader library name: %s", lib_name); + } else { #ifdef _WIN32 - const char *lib_name = "ze_loader.dll"; + lib_name = "ze_loader.dll"; #else - const char *lib_name = "libze_loader.so.1"; + lib_name = "libze_loader.so.1"; #endif + LOG_DEBUG("Using default ze_loader library name: %s", lib_name); + } // The Level Zero shared library should be already loaded by the user // of the Level Zero provider. UMF just want to reuse it // and increase the reference count to the Level Zero shared library. @@ -203,6 +278,8 @@ static void init_ze_global_state(void) { utils_get_symbol_addr(lib_handle, "zeMemCloseIpcHandle", lib_name); *(void **)&g_ze_ops.zeContextMakeMemoryResident = utils_get_symbol_addr( lib_handle, "zeContextMakeMemoryResident", lib_name); + *(void **)&g_ze_ops.zeContextEvictMemory = + utils_get_symbol_addr(lib_handle, "zeContextEvictMemory", lib_name); *(void **)&g_ze_ops.zeDeviceGetProperties = utils_get_symbol_addr(lib_handle, "zeDeviceGetProperties", lib_name); *(void **)&g_ze_ops.zeMemFreeExt = @@ -215,7 +292,8 @@ static void init_ze_global_state(void) { !g_ze_ops.zeMemGetIpcHandle || !g_ze_ops.zeMemOpenIpcHandle || !g_ze_ops.zeMemCloseIpcHandle || !g_ze_ops.zeContextMakeMemoryResident || - !g_ze_ops.zeDeviceGetProperties || !g_ze_ops.zeMemGetAllocProperties) { + !g_ze_ops.zeContextEvictMemory || !g_ze_ops.zeDeviceGetProperties || + !g_ze_ops.zeMemGetAllocProperties) { // g_ze_ops.zeMemPutIpcHandle can be NULL because it was introduced // starting from Level Zero 1.6 LOG_FATAL("Required Level Zero symbols not found."); @@ -248,7 +326,10 @@ umf_result_t umfLevelZeroMemoryProviderParamsCreate( params->resident_device_handles = NULL; params->resident_device_count = 0; params->freePolicy = UMF_LEVEL_ZERO_MEMORY_PROVIDER_FREE_POLICY_DEFAULT; + params->use_import_export_for_IPC = 0; // disabled by default - use IPC params->device_ordinal = 0; + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; *hParams = params; @@ -318,6 +399,24 @@ umf_result_t umfLevelZeroMemoryProviderParamsSetDeviceOrdinal( return UMF_RESULT_SUCCESS; } +umf_result_t umfLevelZeroMemoryProviderParamsSetName( + umf_level_zero_memory_provider_params_handle_t hParams, const char *name) { + if (!hParams) { + LOG_ERR("Level Zero memory provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (!name) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} + umf_result_t umfLevelZeroMemoryProviderParamsSetResidentDevices( umf_level_zero_memory_provider_params_handle_t hParams, ze_device_handle_t *hDevices, uint32_t deviceCount) { @@ -325,12 +424,22 @@ umf_result_t umfLevelZeroMemoryProviderParamsSetResidentDevices( LOG_ERR("Level Zero memory provider params handle is NULL"); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } - - if (deviceCount && !hDevices) { + if (deviceCount > 0 && hDevices == NULL) { LOG_ERR("Resident devices array is NULL, but deviceCount is not zero"); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } + for (uint32_t first_idx = 0; first_idx < deviceCount; first_idx++) { + for (uint32_t second_idx = 0; second_idx < first_idx; second_idx++) { + if (hDevices[first_idx] == hDevices[second_idx]) { + LOG_ERR("resident devices are not unique, idx: %u and " + "idx: %u both point to device: %p", + first_idx, second_idx, (void *)hDevices[first_idx]); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + } + } + hParams->resident_device_handles = hDevices; hParams->resident_device_count = deviceCount; @@ -371,16 +480,48 @@ static bool use_relaxed_allocation(ze_memory_provider_t *ze_provider, return size > ze_provider->device_properties.maxMemAllocSize; } -static ze_relaxed_allocation_limits_exp_desc_t relaxed_device_allocation_desc = - {.stype = ZE_STRUCTURE_TYPE_RELAXED_ALLOCATION_LIMITS_EXP_DESC, - .pNext = NULL, - .flags = ZE_RELAXED_ALLOCATION_LIMITS_EXP_FLAG_MAX_SIZE}; +static umf_result_t ze_memory_provider_free_helper(void *provider, void *ptr, + size_t bytes, + int update_stats) { + if (ptr == NULL) { + return UMF_RESULT_SUCCESS; + } + + ze_memory_provider_t *ze_provider = provider; + umf_result_t ret; + if (ze_provider->freePolicyFlags == 0) { + ret = ze2umf_result(g_ze_ops.zeMemFree(ze_provider->context, ptr)); + } else { + ze_memory_free_ext_desc_t desc = { + .stype = ZE_STRUCTURE_TYPE_MEMORY_FREE_EXT_DESC, + .pNext = NULL, + .freePolicy = ze_provider->freePolicyFlags}; + + ret = ze2umf_result( + g_ze_ops.zeMemFreeExt(ze_provider->context, &desc, ptr)); + } + + if (ret != UMF_RESULT_SUCCESS) { + return ret; + } + + if (update_stats) { + provider_ctl_stats_free(ze_provider, bytes); + } + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t ze_memory_provider_free(void *provider, void *ptr, + size_t bytes) { + return ze_memory_provider_free_helper(provider, ptr, bytes, 1); +} static umf_result_t ze_memory_provider_alloc_helper(void *provider, size_t size, size_t alignment, int update_stats, void **resultPtr) { - ze_memory_provider_t *ze_provider = (ze_memory_provider_t *)provider; + ze_memory_provider_t *ze_provider = provider; ze_result_t ze_result = ZE_RESULT_SUCCESS; switch (ze2umf_memory_type(ze_provider->memory_type)) { @@ -396,11 +537,29 @@ static umf_result_t ze_memory_provider_alloc_helper(void *provider, size_t size, case UMF_MEMORY_TYPE_DEVICE: { ze_device_mem_alloc_desc_t dev_desc = { .stype = ZE_STRUCTURE_TYPE_DEVICE_MEM_ALLOC_DESC, - .pNext = use_relaxed_allocation(ze_provider, size) - ? &relaxed_device_allocation_desc - : NULL, + .pNext = NULL, .flags = 0, .ordinal = ze_provider->device_ordinal}; + void *lastNext = &dev_desc.pNext; + + ze_relaxed_allocation_limits_exp_desc_t + relaxed_device_allocation_desc_copy = + relaxed_device_allocation_desc; + if (use_relaxed_allocation(ze_provider, size)) { + // add relaxed allocation desc to the pNext chain + *(void **)lastNext = &relaxed_device_allocation_desc_copy; + lastNext = &relaxed_device_allocation_desc_copy.pNext; + } + + // check if the allocation should use import / export mechanism + ze_external_memory_export_desc_t memory_export_desc_copy = + memory_export_desc; + if (ze_provider->use_import_export_for_IPC == 1) { + // add external memory export desc to the pNext chain + *(void **)lastNext = &memory_export_desc_copy; + lastNext = &memory_export_desc_copy.pNext; + } + ze_result = g_ze_ops.zeMemAllocDevice(ze_provider->context, &dev_desc, size, alignment, ze_provider->device, resultPtr); @@ -431,17 +590,38 @@ static umf_result_t ze_memory_provider_alloc_helper(void *provider, size_t size, } if (ze_result != ZE_RESULT_SUCCESS) { + LOG_ERR( + "failed to allocate memory, type: %d, size: %lu, alignment: %lu, " + "result: %d", + ze_provider->memory_type, size, alignment, ze_result); return ze2umf_result(ze_result); } + utils_read_lock(&ze_provider->resident_device_rwlock); for (uint32_t i = 0; i < ze_provider->resident_device_count; i++) { ze_result = g_ze_ops.zeContextMakeMemoryResident( ze_provider->context, ze_provider->resident_device_handles[i], *resultPtr, size); if (ze_result != ZE_RESULT_SUCCESS) { + utils_read_unlock(&ze_provider->resident_device_rwlock); + LOG_ERR("making resident allocation %p of size: %lu on device %p " + "failed with 0x%x", + *resultPtr, size, + (void *)ze_provider->resident_device_handles[i], ze_result); + umf_result_t free_result = + ze_memory_provider_free(ze_provider, *resultPtr, size); + if (free_result != UMF_RESULT_SUCCESS) { + LOG_ERR("failed to free memory with: 0x%x after failed making " + "resident, free fail ignored", + free_result); + } return ze2umf_result(ze_result); } + LOG_DEBUG("allocation %p of size: %lu made resident on device %p", + *resultPtr, size, + (void *)ze_provider->resident_device_handles[i]); } + utils_read_unlock(&ze_provider->resident_device_rwlock); if (update_stats) { provider_ctl_stats_alloc(ze_provider, size); @@ -456,49 +636,10 @@ static umf_result_t ze_memory_provider_alloc(void *provider, size_t size, resultPtr); } -static umf_result_t ze_memory_provider_free_helper(void *provider, void *ptr, - size_t bytes, - int update_stats) { - if (ptr == NULL) { - return UMF_RESULT_SUCCESS; - } - - ze_memory_provider_t *ze_provider = (ze_memory_provider_t *)provider; - umf_result_t ret; - if (ze_provider->freePolicyFlags == 0) { - ret = ze2umf_result(g_ze_ops.zeMemFree(ze_provider->context, ptr)); - } else { - ze_memory_free_ext_desc_t desc = { - .stype = ZE_STRUCTURE_TYPE_MEMORY_FREE_EXT_DESC, - .pNext = NULL, - .freePolicy = ze_provider->freePolicyFlags}; - - ret = ze2umf_result( - g_ze_ops.zeMemFreeExt(ze_provider->context, &desc, ptr)); - } - - if (ret != UMF_RESULT_SUCCESS) { - return ret; - } - - if (update_stats) { - provider_ctl_stats_free(ze_provider, bytes); - } - - return UMF_RESULT_SUCCESS; -} - -static umf_result_t ze_memory_provider_free(void *provider, void *ptr, - size_t bytes) { - return ze_memory_provider_free_helper(provider, ptr, bytes, 1); -} - static umf_result_t query_min_page_size(ze_memory_provider_t *ze_provider, size_t *min_page_size) { assert(min_page_size); - LOG_DEBUG("Querying minimum page size"); - void *ptr; umf_result_t result = ze_memory_provider_alloc_helper(ze_provider, 1, 0, 0, &ptr); @@ -512,6 +653,7 @@ static umf_result_t query_min_page_size(ze_memory_provider_t *ze_provider, ze_provider->context, ptr, &properties, NULL); *min_page_size = properties.pageSize; + LOG_DEBUG("Querying minimum page size, got: %lu", properties.pageSize); ze_memory_provider_free_helper(ze_provider, ptr, 1, 0); @@ -519,9 +661,11 @@ static umf_result_t query_min_page_size(ze_memory_provider_t *ze_provider, } static umf_result_t ze_memory_provider_finalize(void *provider) { - ze_memory_provider_t *ze_provider = (ze_memory_provider_t *)provider; - umf_ba_global_free(ze_provider->resident_device_handles); - + ze_memory_provider_t *ze_provider = provider; + if (ze_provider->resident_device_handles != NULL) { + umf_ba_global_free(ze_provider->resident_device_handles); + } + utils_rwlock_destroy_not_free(&ze_provider->resident_device_rwlock); umf_ba_global_free(provider); return UMF_RESULT_SUCCESS; } @@ -546,10 +690,11 @@ static umf_result_t ze_memory_provider_initialize(const void *params, return UMF_RESULT_ERROR_INVALID_ARGUMENT; } - if ((bool)ze_params->resident_device_count && - (ze_params->resident_device_handles == NULL)) { - LOG_ERR("Resident devices handles array is NULL, but device_count is " - "not zero"); + if (ze_params->resident_device_count > 0 && + ze_params->resident_device_handles == NULL) { + LOG_ERR("Device handler should be non-NULL if device_count: %d is " + "greater than 0", + ze_params->resident_device_count); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } @@ -565,12 +710,17 @@ static umf_result_t ze_memory_provider_initialize(const void *params, LOG_ERR("Cannot allocate memory for Level Zero Memory Provider"); return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } + memset(ze_provider, 0, sizeof(*ze_provider)); + snprintf(ze_provider->name, sizeof(ze_provider->name), "%s", + ze_params->name); ze_provider->context = ze_params->level_zero_context_handle; ze_provider->device = ze_params->level_zero_device_handle; ze_provider->memory_type = umf2ze_memory_type(ze_params->memory_type); ze_provider->freePolicyFlags = umfFreePolicyToZePolicy(ze_params->freePolicy); + ze_provider->use_import_export_for_IPC = + ze_params->use_import_export_for_IPC; ze_provider->min_page_size = 0; ze_provider->device_ordinal = ze_params->device_ordinal; @@ -589,27 +739,36 @@ static umf_result_t ze_memory_provider_initialize(const void *params, } } - if (ze_params->resident_device_count) { + if (utils_rwlock_init(&ze_provider->resident_device_rwlock) != 0) { + LOG_ERR("Cannot initialize resident device rwlock"); + umf_ba_global_free(ze_provider); + return UMF_RESULT_ERROR_OUT_OF_RESOURCES; + } + + ze_provider->resident_device_count = ze_params->resident_device_count; + ze_provider->resident_device_capacity = ze_params->resident_device_count; + + if (ze_params->resident_device_count > 0) { ze_provider->resident_device_handles = umf_ba_global_alloc( sizeof(ze_device_handle_t) * ze_params->resident_device_count); - if (!ze_provider->resident_device_handles) { + if (ze_provider->resident_device_handles == NULL) { LOG_ERR("Cannot allocate memory for resident devices"); + utils_rwlock_destroy_not_free(&ze_provider->resident_device_rwlock); umf_ba_global_free(ze_provider); return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } - ze_provider->resident_device_count = ze_params->resident_device_count; + memcpy(ze_provider->resident_device_handles, + ze_params->resident_device_handles, + sizeof(ze_device_handle_t) * ze_params->resident_device_count); - for (uint32_t i = 0; i < ze_provider->resident_device_count; i++) { - ze_provider->resident_device_handles[i] = - ze_params->resident_device_handles[i]; - } + LOG_INFO("L0 memory provider: %p have %d resident device(s)", + (void *)ze_provider, ze_params->resident_device_count); } else { - ze_provider->resident_device_handles = NULL; - ze_provider->resident_device_count = 0; + LOG_INFO("L0 memory provider has no resident devices"); } - umf_result_t result = + const umf_result_t result = query_min_page_size(ze_provider, &ze_provider->min_page_size); if (result != UMF_RESULT_SUCCESS) { ze_memory_provider_finalize(ze_provider); @@ -638,7 +797,7 @@ ze_memory_provider_get_last_native_error(void *provider, const char **ppMessage, static umf_result_t ze_memory_provider_get_min_page_size(void *provider, const void *ptr, size_t *pageSize) { - ze_memory_provider_t *ze_provider = (ze_memory_provider_t *)provider; + ze_memory_provider_t *ze_provider = provider; if (!ptr) { *pageSize = ze_provider->min_page_size; @@ -687,8 +846,15 @@ ze_memory_provider_get_recommended_page_size(void *provider, size_t size, static umf_result_t ze_memory_provider_get_name(void *provider, const char **name) { - (void)provider; - *name = "LEVEL_ZERO"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + ze_memory_provider_t *ze_provider = provider; + *name = ze_provider->name; return UMF_RESULT_SUCCESS; } @@ -720,6 +886,7 @@ static umf_result_t ze_memory_provider_allocation_split(void *provider, typedef struct ze_ipc_data_t { int pid; + size_t size; ze_ipc_mem_handle_t ze_handle; } ze_ipc_data_t; @@ -735,20 +902,46 @@ static umf_result_t ze_memory_provider_get_ipc_handle(void *provider, const void *ptr, size_t size, void *providerIpcData) { - (void)size; - ze_result_t ze_result; ze_ipc_data_t *ze_ipc_data = (ze_ipc_data_t *)providerIpcData; struct ze_memory_provider_t *ze_provider = (struct ze_memory_provider_t *)provider; - ze_result = g_ze_ops.zeMemGetIpcHandle(ze_provider->context, ptr, - &ze_ipc_data->ze_handle); - if (ze_result != ZE_RESULT_SUCCESS) { - LOG_ERR("zeMemGetIpcHandle() failed."); - return ze2umf_result(ze_result); + if (ze_provider->use_import_export_for_IPC == 0) { + // default - IPC API + ze_result = g_ze_ops.zeMemGetIpcHandle(ze_provider->context, ptr, + &ze_ipc_data->ze_handle); + if (ze_result != ZE_RESULT_SUCCESS) { + LOG_ERR("zeMemGetIpcHandle() failed."); + return ze2umf_result(ze_result); + } + } else { + // import / export API (NOTE this requires additional flags enabled + // during the memory allocation) + ze_external_memory_export_fd_t fd_desc = { + .stype = ZE_STRUCTURE_TYPE_EXTERNAL_MEMORY_EXPORT_FD, + .pNext = NULL, + .flags = ZE_EXTERNAL_MEMORY_TYPE_FLAG_OPAQUE_WIN32, + .fd = 0}; + + ze_memory_allocation_properties_t mem_alloc_props = { + .stype = ZE_STRUCTURE_TYPE_MEMORY_ALLOCATION_PROPERTIES, + .pNext = &fd_desc, + .type = 0, + .id = 0, + .pageSize = 0}; + + ze_result = g_ze_ops.zeMemGetAllocProperties(ze_provider->context, ptr, + &mem_alloc_props, NULL); + if (ze_result != ZE_RESULT_SUCCESS) { + LOG_ERR("zeMemGetAllocProperties() failed."); + return ze2umf_result(ze_result); + } + + memcpy(&ze_ipc_data->ze_handle, &fd_desc.fd, sizeof(fd_desc.fd)); } + ze_ipc_data->size = size; ze_ipc_data->pid = utils_getpid(); return UMF_RESULT_SUCCESS; @@ -799,14 +992,41 @@ static umf_result_t ze_memory_provider_open_ipc_handle(void *provider, memcpy(&ze_ipc_handle, &fd_local, sizeof(fd_local)); } - ze_result = g_ze_ops.zeMemOpenIpcHandle( - ze_provider->context, ze_provider->device, ze_ipc_handle, 0, ptr); - if (fd_local != -1) { - (void)utils_close_fd(fd_local); - } - if (ze_result != ZE_RESULT_SUCCESS) { - LOG_ERR("zeMemOpenIpcHandle() failed."); - return ze2umf_result(ze_result); + if (ze_provider->use_import_export_for_IPC == 0) { + // default - IPC API + ze_result = g_ze_ops.zeMemOpenIpcHandle( + ze_provider->context, ze_provider->device, ze_ipc_handle, 0, ptr); + if (fd_local != -1) { + (void)utils_close_fd(fd_local); + } + if (ze_result != ZE_RESULT_SUCCESS) { + LOG_ERR("zeMemOpenIpcHandle() failed."); + return ze2umf_result(ze_result); + } + } else { + // import / export API + ze_external_memory_import_fd_t import_fd = { + .stype = ZE_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMPORT_FD, + .pNext = NULL, + .flags = ZE_EXTERNAL_MEMORY_TYPE_FLAG_DMA_BUF, + .fd = fd_local}; + + ze_device_mem_alloc_desc_t alloc_desc = { + .stype = ZE_STRUCTURE_TYPE_DEVICE_MEM_ALLOC_DESC, + .pNext = &import_fd, + .flags = 0, + .ordinal = 0}; + ze_result = g_ze_ops.zeMemAllocDevice(ze_provider->context, &alloc_desc, + ze_ipc_data->size, 0, + ze_provider->device, ptr); + if (fd_local != -1) { + (void)utils_close_fd(fd_local); + } + + if (ze_result != ZE_RESULT_SUCCESS) { + LOG_ERR("zeMemAllocDevice() failed."); + return ze2umf_result(ze_result); + } } return UMF_RESULT_SUCCESS; @@ -838,6 +1058,223 @@ static umf_result_t ze_ctl(void *hProvider, query_type, arg, size, args); } +static umf_result_t ze_memory_provider_get_allocation_properties( + void *provider, const void *ptr, + umf_memory_property_id_t memory_property_id, void *value) { + + // unused + (void)ptr; + + struct ze_memory_provider_t *ze_provider = + (struct ze_memory_provider_t *)provider; + + switch (memory_property_id) { + case UMF_MEMORY_PROPERTY_POINTER_TYPE: + *(umf_usm_memory_type_t *)value = + ze2umf_memory_type(ze_provider->memory_type); + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_CONTEXT: + *(ze_context_handle_t *)value = ze_provider->context; + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_DEVICE: + *(ze_device_handle_t *)value = ze_provider->device; + return UMF_RESULT_SUCCESS; + + default: + break; + } + + return UMF_RESULT_ERROR_INVALID_ARGUMENT; +} + +static umf_result_t ze_memory_provider_get_allocation_properties_size( + void *provider, umf_memory_property_id_t memory_property_id, size_t *size) { + + // unused + (void)provider; + + switch (memory_property_id) { + case UMF_MEMORY_PROPERTY_POINTER_TYPE: + *size = sizeof(umf_usm_memory_type_t); + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_CONTEXT: + *size = sizeof(ze_context_handle_t); + return UMF_RESULT_SUCCESS; + + case UMF_MEMORY_PROPERTY_DEVICE: + *size = sizeof(ze_device_handle_t); + return UMF_RESULT_SUCCESS; + + default: + break; + } + + return UMF_RESULT_ERROR_INVALID_ARGUMENT; +} + +struct ze_memory_provider_resident_device_change_data { + bool is_adding; + ze_device_handle_t peer_device; + ze_memory_provider_t *source_memory_provider; + uint32_t success_changes; + uint32_t failed_changes; +}; + +static int ze_memory_provider_resident_device_change_helper(uintptr_t key, + void *value, + void *privdata) { + struct ze_memory_provider_resident_device_change_data *change_data = + privdata; + tracker_alloc_info_t *info = value; + if (info->props.provider->provider_priv != + (void *)change_data->source_memory_provider) { + LOG_DEBUG("ze_memory_provider_resident_device_change found not our " + "pointer %p", + (void *)key); + return 0; + } + + assert(info->props.base == (void *)key); + + ze_result_t result; + if (change_data->is_adding) { + result = g_ze_ops.zeContextMakeMemoryResident( + change_data->source_memory_provider->context, + change_data->peer_device, info->props.base, info->props.base_size); + } else { + result = g_ze_ops.zeContextEvictMemory( + change_data->source_memory_provider->context, + change_data->peer_device, info->props.base, info->props.base_size); + } + + if (result != ZE_RESULT_SUCCESS) { + LOG_ERR( + "ze_memory_provider_resident_device_change found our pointer " + "%p but failed to make it resident on device: %p due to err: %d", + (void *)key, (void *)change_data->peer_device, result); + ++change_data->failed_changes; + store_last_native_error(result); + return 1; + } + + LOG_DEBUG("ze_memory_provider_resident_device_change found our pointer %p " + "and made it resident on device: %p", + (void *)key, (void *)change_data->peer_device); + ++change_data->success_changes; + return 0; +} + +umf_result_t umfLevelZeroMemoryProviderResidentDeviceChange( + umf_memory_provider_handle_t provider, ze_device_handle_t device, + bool is_adding) { + ze_memory_provider_t *ze_provider = umfMemoryProviderGetPriv(provider); + + LOG_INFO("%s resident device %p, src_provider: %p, existing peers " + "count: %d", + (is_adding ? "adding" : "removing"), (void *)device, + (void *)provider, ze_provider->resident_device_count); + + uint32_t existing_peer_index = 0; + utils_write_lock(&ze_provider->resident_device_rwlock); + for (; existing_peer_index < ze_provider->resident_device_count; + ++existing_peer_index) { + if (ze_provider->resident_device_handles[existing_peer_index] == + device) { + break; + } + } + + if (ze_provider->resident_device_count == 0 || + existing_peer_index == ze_provider->resident_device_count) { + // not found + if (!is_adding) { + utils_write_unlock(&ze_provider->resident_device_rwlock); + LOG_ERR("trying to remove resident device %p but the device " + "is currently not a peer of provider: %p", + (void *)device, (void *)provider); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + // adding case + if (ze_provider->resident_device_count == + ze_provider->resident_device_capacity) { + const uint32_t new_capacity = + ze_provider->resident_device_capacity + 1; + ze_device_handle_t *new_handles = + umf_ba_global_alloc(sizeof(ze_device_handle_t) * new_capacity); + if (new_handles == NULL) { + utils_write_unlock(&ze_provider->resident_device_rwlock); + LOG_ERR("enlarging resident devices array from %u to %u failed " + "due to no memory", + ze_provider->resident_device_capacity, new_capacity); + return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; + } + LOG_DEBUG("enlarging resident devices array from %u to %u", + ze_provider->resident_device_capacity, new_capacity); + if (ze_provider->resident_device_count > 0) { + ASSERT(ze_provider->resident_device_handles != NULL); + memcpy(new_handles, ze_provider->resident_device_handles, + sizeof(ze_device_handle_t) * + ze_provider->resident_device_count); + } + umf_ba_global_free(ze_provider->resident_device_handles); + ze_provider->resident_device_handles = new_handles; + ze_provider->resident_device_capacity = new_capacity; + } + ze_provider->resident_device_handles[existing_peer_index] = device; + ++ze_provider->resident_device_count; + + } else { + // found + if (is_adding) { + utils_write_unlock(&ze_provider->resident_device_rwlock); + LOG_ERR("trying to add resident device: %p but the device is " + "already a peer of provider: %p", + (void *)device, (void *)provider); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + // removing case, put last in place of removed one + --ze_provider->resident_device_count; + ze_provider->resident_device_handles[existing_peer_index] = + ze_provider + ->resident_device_handles[ze_provider->resident_device_count]; + } + utils_write_unlock(&ze_provider->resident_device_rwlock); + + struct ze_memory_provider_resident_device_change_data privData = { + .is_adding = is_adding, + .peer_device = device, + .source_memory_provider = ze_provider, + .success_changes = 0, + .failed_changes = 0, + }; + + // This is "hacky" and it will not work if someone uses pool without tracker + // or just use provider without pool. It can be solved by keeping track of + // allocations by the provider like in os_provider. + umf_result_t result = umfMemoryTrackerIterateAll( + &ze_memory_provider_resident_device_change_helper, &privData); + if (result != UMF_RESULT_SUCCESS) { + LOG_ERR("umfMemoryTrackerIterateAll failed during resident device " + "change with result: %d numFailed: %d, numSuccess: %d", + result, privData.success_changes, privData.failed_changes); + return result; + } + + if (privData.failed_changes > 0) { + LOG_ERR("umfMemoryTrackerIterateAll did not manage to do some change " + "numFailed: %d, numSuccess: %d", + privData.success_changes, privData.failed_changes); + return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC; + } + + LOG_INFO("ze_memory_provider_resident_device_change done, numSuccess: %d", + privData.success_changes); + return UMF_RESULT_SUCCESS; +} + static umf_memory_provider_ops_t UMF_LEVEL_ZERO_MEMORY_PROVIDER_OPS = { .version = UMF_PROVIDER_OPS_VERSION_CURRENT, .initialize = ze_memory_provider_initialize, @@ -858,6 +1295,10 @@ static umf_memory_provider_ops_t UMF_LEVEL_ZERO_MEMORY_PROVIDER_OPS = { .ext_open_ipc_handle = ze_memory_provider_open_ipc_handle, .ext_close_ipc_handle = ze_memory_provider_close_ipc_handle, .ext_ctl = ze_ctl, + .ext_get_allocation_properties = + ze_memory_provider_get_allocation_properties, + .ext_get_allocation_properties_size = + ze_memory_provider_get_allocation_properties_size, }; const umf_memory_provider_ops_t *umfLevelZeroMemoryProviderOps(void) { @@ -939,6 +1380,13 @@ umf_result_t umfLevelZeroMemoryProviderParamsSetDeviceOrdinal( return UMF_RESULT_ERROR_NOT_SUPPORTED; } +umf_result_t umfLevelZeroMemoryProviderParamsSetName( + umf_level_zero_memory_provider_params_handle_t hParams, const char *name) { + (void)hParams; + (void)name; + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + const umf_memory_provider_ops_t *umfLevelZeroMemoryProviderOps(void) { // not supported LOG_ERR("L0 memory provider is disabled! (UMF_BUILD_LEVEL_ZERO_PROVIDER is " @@ -946,4 +1394,15 @@ const umf_memory_provider_ops_t *umfLevelZeroMemoryProviderOps(void) { return NULL; } +umf_result_t umfLevelZeroMemoryProviderResidentDeviceChange( + umf_memory_provider_handle_t provider, ze_device_handle_t device, + bool is_adding) { + (void)provider; + (void)device; + (void)is_adding; + LOG_ERR("L0 memory provider is disabled! (UMF_BUILD_LEVEL_ZERO_PROVIDER is " + "OFF)"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; +} + #endif // !UMF_BUILD_LEVEL_ZERO_PROVIDER diff --git a/src/provider/provider_os_memory.c b/src/provider/provider_os_memory.c index e984d8ee83..edaf315dc4 100644 --- a/src/provider/provider_os_memory.c +++ b/src/provider/provider_os_memory.c @@ -8,97 +8,24 @@ #include #include #include - #include #include #include #include + #include #include #include #include #include -#include "ctl/ctl_internal.h" -#include "utils_assert.h" -// OS Memory Provider requires HWLOC -#if defined(UMF_NO_HWLOC) - -const umf_memory_provider_ops_t *umfOsMemoryProviderOps(void) { return NULL; } - -umf_result_t umfOsMemoryProviderParamsCreate( - umf_os_memory_provider_params_handle_t *hParams) { - (void)hParams; - return UMF_RESULT_ERROR_NOT_SUPPORTED; -} - -umf_result_t umfOsMemoryProviderParamsDestroy( - umf_os_memory_provider_params_handle_t hParams) { - (void)hParams; - return UMF_RESULT_ERROR_NOT_SUPPORTED; -} - -umf_result_t umfOsMemoryProviderParamsSetProtection( - umf_os_memory_provider_params_handle_t hParams, unsigned protection) { - (void)hParams; - (void)protection; - return UMF_RESULT_ERROR_NOT_SUPPORTED; -} - -umf_result_t umfOsMemoryProviderParamsSetVisibility( - umf_os_memory_provider_params_handle_t hParams, - umf_memory_visibility_t visibility) { - (void)hParams; - (void)visibility; - return UMF_RESULT_ERROR_NOT_SUPPORTED; -} - -umf_result_t umfOsMemoryProviderParamsSetShmName( - umf_os_memory_provider_params_handle_t hParams, const char *shm_name) { - (void)hParams; - (void)shm_name; - return UMF_RESULT_ERROR_NOT_SUPPORTED; -} - -umf_result_t umfOsMemoryProviderParamsSetNumaList( - umf_os_memory_provider_params_handle_t hParams, unsigned *numa_list, - unsigned numa_list_len) { - (void)hParams; - (void)numa_list; - (void)numa_list_len; - return UMF_RESULT_ERROR_NOT_SUPPORTED; -} - -umf_result_t umfOsMemoryProviderParamsSetNumaMode( - umf_os_memory_provider_params_handle_t hParams, umf_numa_mode_t numa_mode) { - (void)hParams; - (void)numa_mode; - return UMF_RESULT_ERROR_NOT_SUPPORTED; -} - -umf_result_t umfOsMemoryProviderParamsSetPartSize( - umf_os_memory_provider_params_handle_t hParams, size_t part_size) { - (void)hParams; - (void)part_size; - return UMF_RESULT_ERROR_NOT_SUPPORTED; -} - -umf_result_t umfOsMemoryProviderParamsSetPartitions( - umf_os_memory_provider_params_handle_t hParams, - umf_numa_split_partition_t *partitions, unsigned partitions_len) { - (void)hParams; - (void)partitions; - (void)partitions_len; - return UMF_RESULT_ERROR_NOT_SUPPORTED; -} - -#else // !defined(UMF_NO_HWLOC) - #include "base_alloc_global.h" #include "critnib.h" +#include "ctl/ctl_internal.h" #include "libumf.h" #include "provider_os_memory_internal.h" #include "topology.h" +#include "utils_assert.h" #include "utils_common.h" #include "utils_concurrency.h" #include "utils_log.h" @@ -110,6 +37,8 @@ umf_result_t umfOsMemoryProviderParamsSetPartitions( #define TLS_MSG_BUF_LEN 1024 +static const char *DEFAULT_NAME = "OS"; + typedef struct umf_os_memory_provider_params_t { // Combination of 'umf_mem_protection_flags_t' flags unsigned protection; @@ -134,6 +63,7 @@ typedef struct umf_os_memory_provider_params_t { umf_numa_split_partition_t *partitions; /// len of the partitions array unsigned partitions_len; + char name[64]; } umf_os_memory_provider_params_t; typedef struct os_last_native_error_t { @@ -629,6 +559,8 @@ static umf_result_t os_initialize(const void *params, void **provider) { } memset(os_provider, 0, sizeof(*os_provider)); + snprintf(os_provider->name, sizeof(os_provider->name), "%s", + in_params->name); os_provider->topo = umfGetTopologyReduced(); if (!os_provider->topo) { @@ -636,6 +568,7 @@ static umf_result_t os_initialize(const void *params, void **provider) { 0); LOG_ERR("HWLOC topology discovery failed"); ret = UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC; + goto err_free_os_provider; } os_provider->fd_offset_map = critnib_new(NULL, NULL); @@ -678,7 +611,6 @@ static umf_result_t os_initialize(const void *params, void **provider) { } *provider = os_provider; - return UMF_RESULT_SUCCESS; err_destroy_bitmaps: @@ -1221,8 +1153,15 @@ static umf_result_t os_purge_force(void *provider, void *ptr, size_t size) { } static umf_result_t os_get_name(void *provider, const char **name) { - (void)provider; // unused - *name = "OS"; + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + if (provider == NULL) { + *name = DEFAULT_NAME; + return UMF_RESULT_SUCCESS; + } + os_memory_provider_t *os_provider = (os_memory_provider_t *)provider; + *name = os_provider->name; return UMF_RESULT_SUCCESS; } @@ -1403,7 +1342,8 @@ static umf_result_t os_open_ipc_handle(void *provider, void *providerIpcData, os_ipc_data->visibility, fd, os_ipc_data->fd_offset); if (*ptr == NULL) { os_store_last_native_error(UMF_OS_RESULT_ERROR_ALLOC_FAILED, errno); - LOG_PERR("memory mapping failed"); + LOG_PERR("memory mapping failed: %zu bytes at fd=%d, offset=%zu", + os_ipc_data->size, fd, os_ipc_data->fd_offset); ret = UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC; } @@ -1492,6 +1432,8 @@ umf_result_t umfOsMemoryProviderParamsCreate( params->part_size = 0; params->partitions = NULL; params->partitions_len = 0; + strncpy(params->name, DEFAULT_NAME, sizeof(params->name) - 1); + params->name[sizeof(params->name) - 1] = '\0'; *hParams = params; @@ -1648,4 +1590,21 @@ umf_result_t umfOsMemoryProviderParamsSetPartitions( return UMF_RESULT_SUCCESS; } -#endif // !defined(UMF_NO_HWLOC) +umf_result_t +umfOsMemoryProviderParamsSetName(umf_os_memory_provider_params_handle_t hParams, + const char *name) { + if (hParams == NULL) { + LOG_ERR("OS memory provider params handle is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (name == NULL) { + LOG_ERR("name is NULL"); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(hParams->name, name, sizeof(hParams->name) - 1); + hParams->name[sizeof(hParams->name) - 1] = '\0'; + + return UMF_RESULT_SUCCESS; +} diff --git a/src/provider/provider_os_memory_internal.h b/src/provider/provider_os_memory_internal.h index 4d2e8e2176..3648d4a88f 100644 --- a/src/provider/provider_os_memory_internal.h +++ b/src/provider/provider_os_memory_internal.h @@ -70,6 +70,8 @@ typedef struct os_memory_provider_t { hwloc_topology_t topo; + char name[64]; + ctl_stats_t stats; } os_memory_provider_t; diff --git a/src/provider/provider_tracking.c b/src/provider/provider_tracking.c index 386eef0ba3..573e9bb357 100644 --- a/src/provider/provider_tracking.c +++ b/src/provider/provider_tracking.c @@ -15,6 +15,7 @@ #include #include +#include #include #include "base_alloc_global.h" @@ -22,6 +23,8 @@ #include "ipc_cache.h" #include "ipc_internal.h" #include "memory_pool_internal.h" +#include "memory_properties_internal.h" +#include "memory_provider_internal.h" #include "provider_tracking.h" #include "utils_common.h" #include "utils_concurrency.h" @@ -32,6 +35,8 @@ uint64_t IPC_HANDLE_ID = 0; +uint64_t unique_alloc_id = 0; // requires atomic access + struct umf_memory_tracker_t { umf_ba_pool_t *alloc_info_allocator; // Multilevel maps are needed to support the case @@ -43,19 +48,8 @@ struct umf_memory_tracker_t { critnib *ipc_segments_map; }; -typedef struct tracker_alloc_info_t { - umf_memory_pool_handle_t pool; - size_t size; - // number of overlapping memory regions - // in the next level of map - // falling within the current range - size_t n_children; -#if !defined(NDEBUG) && defined(UMF_DEVELOPER_MODE) - uint64_t is_freed; -#endif -} tracker_alloc_info_t; - typedef struct tracker_ipc_info_t { + umf_memory_properties_t props; size_t size; umf_memory_provider_handle_t provider; ipc_opened_cache_value_t *ipc_cache_value; @@ -113,7 +107,8 @@ static tracker_alloc_info_t *get_most_nested_alloc_segment( continue; } - utils_atomic_load_acquire_u64((uint64_t *)&rvalue->size, &rsize); + utils_atomic_load_acquire_u64((uint64_t *)&rvalue->props.base_size, + &rsize); utils_atomic_load_acquire_size_t(&rvalue->n_children, &n_children); if (found && ((uintptr_t)ptr < rkey + rsize) && n_children) { if (level == MAX_LEVELS_OF_ALLOC_SEGMENT_MAP - 1) { @@ -192,8 +187,26 @@ umfMemoryTrackerAddAtLevel(umf_memory_tracker_handle_t hTracker, int level, return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } - value->pool = pool; - value->size = size; + // get provider + umf_memory_provider_handle_t provider = NULL; + umfPoolGetMemoryProvider(pool, &provider); + + // get memory type + umf_usm_memory_type_t memory_type = UMF_MEMORY_TYPE_UNKNOWN; + if (provider && provider->ops.ext_get_allocation_properties) { + provider->ops.ext_get_allocation_properties( + provider->provider_priv, ptr, UMF_MEMORY_PROPERTY_POINTER_TYPE, + &memory_type); + } + + memset(&value->props, 0, sizeof(umf_memory_properties_t)); + value->props.id = utils_atomic_increment_u64(&unique_alloc_id); + value->props.base = (void *)ptr; + value->props.base_size = size; + value->props.pool = pool; + value->props.provider = provider; + value->props.memory_type = memory_type; + value->n_children = 0; #if !defined(NDEBUG) && defined(UMF_DEVELOPER_MODE) value->is_freed = 0; @@ -214,8 +227,8 @@ umfMemoryTrackerAddAtLevel(umf_memory_tracker_handle_t hTracker, int level, "child #%zu added to memory region: tracker=%p, level=%i, " "pool=%p, ptr=%p, size=%zu", n_children, (void *)hTracker, level - 1, - (void *)parent_value->pool, (void *)parent_key, - parent_value->size); + (void *)parent_value->props.pool, (void *)parent_key, + parent_value->props.base_size); assert(ref_parent_value); critnib_release(hTracker->alloc_segments_map[level - 1], ref_parent_value); @@ -286,7 +299,8 @@ static umf_result_t umfMemoryTrackerAdd(umf_memory_tracker_handle_t hTracker, assert(is_freed != 0xDEADBEEF); #endif - utils_atomic_load_acquire_u64((uint64_t *)&rvalue->size, &rsize); + utils_atomic_load_acquire_u64((uint64_t *)&rvalue->props.base_size, + &rsize); if ((uintptr_t)ptr < rkey + rsize) { if (level == MAX_LEVELS_OF_ALLOC_SEGMENT_MAP - 1) { @@ -300,8 +314,8 @@ static umf_result_t umfMemoryTrackerAdd(umf_memory_tracker_handle_t hTracker, "cannot insert to the tracker value (pool=%p, ptr=%p, " "size=%zu) " "that exceeds the parent value (pool=%p, ptr=%p, size=%zu)", - (void *)pool, ptr, size, (void *)rvalue->pool, (void *)rkey, - (size_t)rsize); + (void *)pool, ptr, size, (void *)rvalue->props.pool, + (void *)rkey, (size_t)rsize); return UMF_RESULT_ERROR_INVALID_ARGUMENT; } parent_key = rkey; @@ -363,7 +377,8 @@ static umf_result_t umfMemoryTrackerRemove(umf_memory_tracker_handle_t hTracker, LOG_DEBUG("memory region removed: tracker=%p, level=%i, pool=%p, ptr=%p, " "size=%zu", - (void *)hTracker, level, (void *)value->pool, ptr, value->size); + (void *)hTracker, level, (void *)value->props.pool, ptr, + value->props.base_size); // release the reference to the value got from critnib_remove() assert(ref_value); @@ -375,8 +390,9 @@ static umf_result_t umfMemoryTrackerRemove(umf_memory_tracker_handle_t hTracker, LOG_DEBUG( "child #%zu removed from memory region: tracker=%p, level=%i, " "pool=%p, ptr=%p, size=%zu", - n_children, (void *)hTracker, level - 1, (void *)parent_value->pool, - (void *)parent_key, parent_value->size); + n_children, (void *)hTracker, level - 1, + (void *)parent_value->props.pool, (void *)parent_key, + parent_value->props.base_size); assert(ref_parent_value); assert(level >= 1); @@ -405,10 +421,26 @@ umfMemoryTrackerAddIpcSegment(umf_memory_tracker_handle_t hTracker, return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY; } + // get memory type + umf_usm_memory_type_t memory_type = UMF_MEMORY_TYPE_UNKNOWN; + if (provider && provider->ops.ext_get_allocation_properties) { + provider->ops.ext_get_allocation_properties( + provider->provider_priv, ptr, UMF_MEMORY_PROPERTY_POINTER_TYPE, + &memory_type); + } + value->size = size; value->provider = provider; value->ipc_cache_value = cache_entry; + memset(&value->props, 0, sizeof(umf_memory_properties_t)); + value->props.id = utils_atomic_increment_u64(&unique_alloc_id); + value->props.base = (void *)ptr; + value->props.base_size = size; + value->props.pool = NULL; // unknown + value->props.provider = provider; + value->props.memory_type = memory_type; + int ret = critnib_insert(hTracker->ipc_segments_map, (uintptr_t)ptr, value, 0); if (ret == 0) { @@ -458,37 +490,26 @@ umfMemoryTrackerRemoveIpcSegment(umf_memory_tracker_handle_t hTracker, return UMF_RESULT_SUCCESS; } -umf_memory_pool_handle_t umfMemoryTrackerGetPool(const void *ptr) { - umf_alloc_info_t allocInfo = {NULL, 0, NULL}; - umf_result_t ret = umfMemoryTrackerGetAllocInfo(ptr, &allocInfo); - if (ret != UMF_RESULT_SUCCESS) { - return NULL; - } - - return allocInfo.pool; -} - umf_result_t umfMemoryTrackerGetAllocInfo(const void *ptr, - umf_alloc_info_t *pAllocInfo) { - assert(pAllocInfo); + tracker_alloc_info_t **info) { + assert(info); - if (ptr == NULL) { + if (UNLIKELY(ptr == NULL)) { return UMF_RESULT_ERROR_INVALID_ARGUMENT; } - if (TRACKER == NULL) { + if (UNLIKELY(TRACKER == NULL)) { LOG_ERR("tracker does not exist"); return UMF_RESULT_ERROR_NOT_SUPPORTED; } - if (TRACKER->alloc_segments_map[0] == NULL) { + if (UNLIKELY(TRACKER->alloc_segments_map[0] == NULL)) { LOG_ERR("tracker's alloc_segments_map does not exist"); return UMF_RESULT_ERROR_NOT_SUPPORTED; } tracker_alloc_info_t *top_most_value = NULL; tracker_alloc_info_t *rvalue = NULL; - uintptr_t top_most_key = 0; uintptr_t rkey = 0; uint64_t rsize = 0; size_t n_children = 0; @@ -514,7 +535,6 @@ umf_result_t umfMemoryTrackerGetAllocInfo(const void *ptr, critnib_release(TRACKER->alloc_segments_map[level], ref_value); } top_most_value = NULL; - top_most_key = 0; rkey = 0; rsize = 0; level = 0; @@ -525,10 +545,10 @@ umf_result_t umfMemoryTrackerGetAllocInfo(const void *ptr, continue; } - utils_atomic_load_acquire_u64((uint64_t *)&rvalue->size, &rsize); + utils_atomic_load_acquire_u64((uint64_t *)&rvalue->props.base_size, + &rsize); utils_atomic_load_acquire_size_t(&rvalue->n_children, &n_children); if (found && (uintptr_t)ptr < rkey + rsize) { - top_most_key = rkey; top_most_value = rvalue; if (ref_top_most_value) { assert(level >= 1); @@ -555,9 +575,7 @@ umf_result_t umfMemoryTrackerGetAllocInfo(const void *ptr, return UMF_RESULT_ERROR_INVALID_ARGUMENT; } - pAllocInfo->base = (void *)top_most_key; - pAllocInfo->baseSize = top_most_value->size; - pAllocInfo->pool = top_most_value->pool; + *info = top_most_value; assert(ref_top_most_value); critnib_release(TRACKER->alloc_segments_map[ref_level], ref_top_most_value); @@ -603,6 +621,8 @@ umf_result_t umfMemoryTrackerGetIpcInfo(const void *ptr, pIpcInfo->baseSize = rvalue->size; pIpcInfo->provider = rvalue->provider; + pIpcInfo->props = &rvalue->props; + if (ref_value) { critnib_release(TRACKER->ipc_segments_map, ref_value); } @@ -692,9 +712,9 @@ static umf_result_t trackingAllocationSplit(void *hProvider, void *ptr, ret = UMF_RESULT_ERROR_INVALID_ARGUMENT; goto err; } - if (value->size != totalSize) { + if (value->props.base_size != totalSize) { LOG_ERR("tracked size=%zu does not match requested size to split: %zu", - value->size, totalSize); + value->props.base_size, totalSize); ret = UMF_RESULT_ERROR_INVALID_ARGUMENT; goto err; } @@ -732,7 +752,8 @@ static umf_result_t trackingAllocationSplit(void *hProvider, void *ptr, } // update the size of the first part - utils_atomic_store_release_u64((uint64_t *)&value->size, firstSize); + utils_atomic_store_release_u64((uint64_t *)&value->props.base_size, + firstSize); critnib_release(provider->hTracker->alloc_segments_map[level], ref_value); utils_mutex_unlock(&provider->hTracker->splitMergeMutex); @@ -805,12 +826,12 @@ static umf_result_t trackingAllocationMerge(void *hProvider, void *lowPtr, ret = UMF_RESULT_ERROR_INVALID_ARGUMENT; goto err_fatal; } - if (lowValue->pool != highValue->pool) { + if (lowValue->props.pool != highValue->props.pool) { LOG_FATAL("pool mismatch"); ret = UMF_RESULT_ERROR_INVALID_ARGUMENT; goto err_fatal; } - if (lowValue->size + highValue->size != totalSize) { + if (lowValue->props.base_size + highValue->props.base_size != totalSize) { LOG_FATAL("lowValue->size + highValue->size != totalSize"); ret = UMF_RESULT_ERROR_INVALID_ARGUMENT; goto err_fatal; @@ -824,7 +845,8 @@ static umf_result_t trackingAllocationMerge(void *hProvider, void *lowPtr, } // we only need to update the size of the first part - utils_atomic_store_release_u64((uint64_t *)&lowValue->size, totalSize); + utils_atomic_store_release_u64((uint64_t *)&lowValue->props.base_size, + totalSize); size_t low_children = lowValue->n_children; size_t high_children = highValue->n_children; @@ -950,12 +972,13 @@ static void check_if_tracker_is_empty(umf_memory_tracker_handle_t hTracker, while (1 == critnib_find(hTracker->alloc_segments_map[i], last_key, FIND_G, &rkey, (void **)&rvalue, &ref_value)) { - if (rvalue && ((rvalue->pool == pool) || pool == NULL)) { + if (rvalue && ((rvalue->props.pool == pool) || pool == NULL)) { n_items++; LOG_DEBUG( "found abandoned allocation in the tracking provider: " "pool=%p, ptr=%p, size=%zu", - (void *)rvalue->pool, (void *)rkey, (size_t)rvalue->size); + (void *)rvalue->props.pool, (void *)rkey, + (size_t)rvalue->props.base_size); } if (ref_value) { @@ -1037,6 +1060,11 @@ static umf_result_t trackingPurgeForce(void *provider, void *ptr, size_t size) { static umf_result_t trackingName(void *provider, const char **name) { umf_tracking_memory_provider_t *p = (umf_tracking_memory_provider_t *)provider; + // if ops->get_name is called with null provider it must return default provider name + if (!p) { + *name = "tracking"; + return UMF_RESULT_SUCCESS; + } return umfMemoryProviderGetName(p->hUpstream, name); } @@ -1295,6 +1323,24 @@ static umf_result_t trackingCloseIpcHandle(void *provider, void *ptr, return umf_result; } +static umf_result_t +trackingGetAllocationProperties(void *provider, const void *ptr, + umf_memory_property_id_t memory_property_id, + void *value) { + umf_tracking_memory_provider_t *p = + (umf_tracking_memory_provider_t *)provider; + return umfMemoryProviderGetAllocationProperties(p->hUpstream, ptr, + memory_property_id, value); +} + +static umf_result_t trackingGetAllocationPropertiesSize( + void *provider, umf_memory_property_id_t memory_property_id, size_t *size) { + umf_tracking_memory_provider_t *p = + (umf_tracking_memory_provider_t *)provider; + return umfMemoryProviderGetAllocationPropertiesSize( + p->hUpstream, memory_property_id, size); +} + umf_memory_provider_ops_t UMF_TRACKING_MEMORY_PROVIDER_OPS = { .version = UMF_PROVIDER_OPS_VERSION_CURRENT, .initialize = trackingInitialize, @@ -1313,7 +1359,10 @@ umf_memory_provider_ops_t UMF_TRACKING_MEMORY_PROVIDER_OPS = { .ext_get_ipc_handle = trackingGetIpcHandle, .ext_put_ipc_handle = trackingPutIpcHandle, .ext_open_ipc_handle = trackingOpenIpcHandle, - .ext_close_ipc_handle = trackingCloseIpcHandle}; + .ext_close_ipc_handle = trackingCloseIpcHandle, + .ext_get_allocation_properties = trackingGetAllocationProperties, + .ext_get_allocation_properties_size = trackingGetAllocationPropertiesSize, +}; static void free_ipc_cache_value(void *unused, void *ipc_cache_value) { (void)unused; @@ -1482,3 +1531,26 @@ void umfMemoryTrackerDestroy(umf_memory_tracker_handle_t handle) { handle->ipc_info_allocator = NULL; umf_ba_global_free(handle); } + +umf_result_t umfMemoryTrackerIterateAll(int (*func)(uintptr_t key, void *value, + void *privdata), + void *privdata) { + if (UNLIKELY(TRACKER == NULL)) { + LOG_ERR("tracker does not exist"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + + if (UNLIKELY(TRACKER->alloc_segments_map[0] == NULL)) { + LOG_ERR("tracker's alloc_segments_map does not exist"); + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } + + for (int level = 0; level < MAX_LEVELS_OF_ALLOC_SEGMENT_MAP; level++) { + critnib *alloc_segment = TRACKER->alloc_segments_map[level]; + LOG_DEBUG("iterating tracker's %d segment: %p", level, + (void *)alloc_segment); + critnib_iter_all(alloc_segment, func, privdata); + } + + return UMF_RESULT_SUCCESS; +} diff --git a/src/provider/provider_tracking.h b/src/provider/provider_tracking.h index d7ee06c1bb..254bbf6786 100644 --- a/src/provider/provider_tracking.h +++ b/src/provider/provider_tracking.h @@ -20,6 +20,7 @@ #include "base_alloc.h" #include "critnib.h" +#include "memory_properties_internal.h" #include "utils_concurrency.h" #ifdef __cplusplus @@ -34,18 +35,23 @@ extern umf_memory_tracker_handle_t TRACKER; umf_result_t umfMemoryTrackerCreate(umf_memory_tracker_handle_t *handle); void umfMemoryTrackerDestroy(umf_memory_tracker_handle_t handle); -umf_memory_pool_handle_t umfMemoryTrackerGetPool(const void *ptr); +typedef struct tracker_alloc_info_t { + umf_memory_properties_t props; -typedef struct umf_alloc_info_t { - void *base; - size_t baseSize; - umf_memory_pool_handle_t pool; -} umf_alloc_info_t; + // number of overlapping memory regions in the next level of map falling + // within the current range + size_t n_children; +#if !defined(NDEBUG) && defined(UMF_DEVELOPER_MODE) + uint64_t is_freed; +#endif +} tracker_alloc_info_t; umf_result_t umfMemoryTrackerGetAllocInfo(const void *ptr, - umf_alloc_info_t *pAllocInfo); + tracker_alloc_info_t **info); typedef struct umf_ipc_info_t { + umf_memory_properties_handle_t props; + void *base; size_t baseSize; umf_memory_provider_handle_t provider; @@ -64,6 +70,10 @@ void umfTrackingMemoryProviderGetUpstreamProvider( umf_memory_provider_handle_t hTrackingProvider, umf_memory_provider_handle_t *hUpstream); +umf_result_t umfMemoryTrackerIterateAll(int (*func)(uintptr_t key, void *value, + void *privdata), + void *privdata); + #ifdef __cplusplus } #endif diff --git a/src/utils/CMakeLists.txt b/src/utils/CMakeLists.txt index 7125d2603b..e26017e79d 100644 --- a/src/utils/CMakeLists.txt +++ b/src/utils/CMakeLists.txt @@ -5,7 +5,8 @@ include(${UMF_CMAKE_SOURCE_DIR}/cmake/helpers.cmake) include(FindThreads) -set(UMF_UTILS_SOURCES_COMMON utils_common.c utils_log.c utils_load_library.c) +set(UMF_UTILS_SOURCES_COMMON utils_common.c utils_log.c utils_load_library.c + ../ctl/ctl.c) set(UMF_UTILS_SOURCES_POSIX utils_posix_common.c utils_posix_concurrency.c) set(UMF_UTILS_SOURCES_LINUX utils_linux_common.c) set(UMF_UTILS_SOURCES_MACOSX utils_macosx_common.c) diff --git a/src/utils/utils_concurrency.h b/src/utils/utils_concurrency.h index a00b8bc405..74c7fc8ae1 100644 --- a/src/utils/utils_concurrency.h +++ b/src/utils/utils_concurrency.h @@ -71,12 +71,12 @@ typedef struct utils_rwlock_t { #endif } utils_rwlock_t; -utils_rwlock_t *utils_rwlock_init(utils_rwlock_t *ptr); +int utils_rwlock_init(utils_rwlock_t *ptr); void utils_rwlock_destroy_not_free(utils_rwlock_t *rwlock); -int utils_read_lock(utils_rwlock_t *rwlock); -int utils_write_lock(utils_rwlock_t *rwlock); -int utils_read_unlock(utils_rwlock_t *rwlock); -int utils_write_unlock(utils_rwlock_t *rwlock); +void utils_read_lock(utils_rwlock_t *rwlock); +void utils_write_lock(utils_rwlock_t *rwlock); +void utils_read_unlock(utils_rwlock_t *rwlock); +void utils_write_unlock(utils_rwlock_t *rwlock); #if defined(_WIN32) #define UTIL_ONCE_FLAG INIT_ONCE diff --git a/src/utils/utils_level_zero.cpp b/src/utils/utils_level_zero.cpp index 6daab3e691..ee7aa85eae 100644 --- a/src/utils/utils_level_zero.cpp +++ b/src/utils/utils_level_zero.cpp @@ -13,8 +13,6 @@ #include "utils_concurrency.h" #include "utils_load_library.h" -#include "ze_api.h" - struct libze_ops { ze_result_t (*zeInit)(ze_init_flags_t flags); ze_result_t (*zeDriverGet)(uint32_t *pCount, ze_driver_handle_t *phDrivers); diff --git a/src/utils/utils_log.c b/src/utils/utils_log.c index 960ae46865..73836cd9e4 100644 --- a/src/utils/utils_log.c +++ b/src/utils/utils_log.c @@ -10,7 +10,6 @@ #ifdef _WIN32 #include #else -#define _GNU_SOURCE 1 #include #include #include @@ -28,6 +27,7 @@ #include +#include "ctl/ctl_internal.h" #include "utils_assert.h" #include "utils_common.h" #include "utils_log.h" @@ -61,14 +61,16 @@ char const __umf_str_1__all_cmake_vars[] = #define MAX_ENV_LEN 2048 typedef struct { - int timestamp; - int pid; + bool enableTimestamp; + bool enablePid; utils_log_level_t level; utils_log_level_t flushLevel; FILE *output; + char file_name[MAX_FILE_PATH]; } utils_log_config_t; -utils_log_config_t loggerConfig = {0, 0, LOG_ERROR, LOG_ERROR, NULL}; +utils_log_config_t loggerConfig = {false, false, LOG_ERROR, + LOG_ERROR, NULL, ""}; static const char *level_to_str(utils_log_level_t l) { switch (l) { @@ -97,8 +99,8 @@ static const char *level_to_str(utils_log_level_t l) { #endif // _MSC_VER static void utils_log_internal(utils_log_level_t level, int perror, - const char *func, const char *format, - va_list args) { + const char *fileline, const char *func, + const char *format, va_list args) { if (!loggerConfig.output && level != LOG_FATAL) { return; //logger not enabled } @@ -113,7 +115,12 @@ static void utils_log_internal(utils_log_level_t level, int perror, char *b_pos = buffer; int b_size = sizeof(buffer); - int tmp = snprintf(b_pos, b_size, "%s: ", func); + int tmp = 0; + if (fileline == NULL) { + tmp = snprintf(b_pos, b_size, "%s: ", func); + } else { + tmp = snprintf(b_pos, b_size, "%s %s: ", fileline, func); + } ASSERT(tmp > 0); b_pos += (int)tmp; @@ -138,28 +145,29 @@ static void utils_log_internal(utils_log_level_t level, int perror, *err = '\0'; postfix = "[strerror_s failed]"; } -#elif defined(__APPLE__) - char err[1024]; // max size according to manpage. +#else int saveno = errno; errno = 0; - if (strerror_r(saveno, err, sizeof(err))) { - /* should never happen */ + +#if defined(__APPLE__) || \ + ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) + char err[1024]; + int ret = strerror_r(saveno, err, sizeof(err)); + if (ret) { *err = '\0'; postfix = "[strerror_r failed]"; } - if (errno == ERANGE) { postfix = "[truncated...]"; } - errno = saveno; #else - char err_buff[1024]; // max size according to manpage. - int saveno = errno; - errno = 0; + char err_buff[1024]; const char *err = strerror_r(saveno, err_buff, sizeof(err_buff)); if (errno == ERANGE) { postfix = "[truncated...]"; } +#endif + errno = saveno; #endif strncpy(b_pos, err, b_size); @@ -185,7 +193,7 @@ static void utils_log_internal(utils_log_level_t level, int perror, int h_size = sizeof(header); memset(header, 0, sizeof(header)); - if (loggerConfig.timestamp) { + if (loggerConfig.enableTimestamp) { time_t now = time(NULL); struct tm tm_info; #ifdef _WIN32 @@ -200,7 +208,7 @@ static void utils_log_internal(utils_log_level_t level, int perror, h_size -= tmp; } - if (loggerConfig.pid) { + if (loggerConfig.enablePid) { ASSERT(h_size > 0); tmp = snprintf(h_pos, h_size, "PID:%-6lu TID:%-6lu ", (unsigned long)pid, (unsigned long)tid); @@ -226,19 +234,19 @@ static void utils_log_internal(utils_log_level_t level, int perror, #pragma warning(pop) #endif // _MSC_VER -void utils_log(utils_log_level_t level, const char *func, const char *format, - ...) { +void utils_log(utils_log_level_t level, const char *fileline, const char *func, + const char *format, ...) { va_list args; va_start(args, format); - utils_log_internal(level, 0, func, format, args); + utils_log_internal(level, 0, fileline, func, format, args); va_end(args); } -void utils_plog(utils_log_level_t level, const char *func, const char *format, - ...) { +void utils_plog(utils_log_level_t level, const char *fileline, const char *func, + const char *format, ...) { va_list args; va_start(args, format); - utils_log_internal(level, 1, func, format, args); + utils_log_internal(level, 1, fileline, func, format, args); va_end(args); } @@ -254,8 +262,10 @@ void utils_log_init(void) { const char *arg; if (utils_parse_var(envVar, "output:stdout", NULL)) { loggerConfig.output = stdout; + strncpy(loggerConfig.file_name, "stdout", MAX_FILE_PATH); } else if (utils_parse_var(envVar, "output:stderr", NULL)) { loggerConfig.output = stderr; + strncpy(loggerConfig.file_name, "stderr", MAX_FILE_PATH); } else if (utils_parse_var(envVar, "output:file", &arg)) { loggerConfig.output = NULL; const char *argEnd = strstr(arg, ";"); @@ -284,6 +294,9 @@ void utils_log_init(void) { loggerConfig.output = NULL; return; } + strncpy(loggerConfig.file_name, file, MAX_FILE_PATH - 1); + loggerConfig.file_name[MAX_FILE_PATH - 1] = + '\0'; // ensure null-termination } else { loggerConfig.output = stderr; LOG_ERR("Logging output not set - logging disabled (UMF_LOG = \"%s\")", @@ -293,15 +306,15 @@ void utils_log_init(void) { } if (utils_parse_var(envVar, "timestamp:yes", NULL)) { - loggerConfig.timestamp = 1; + loggerConfig.enableTimestamp = 1; } else if (utils_parse_var(envVar, "timestamp:no", NULL)) { - loggerConfig.timestamp = 0; + loggerConfig.enableTimestamp = 0; } if (utils_parse_var(envVar, "pid:yes", NULL)) { - loggerConfig.pid = 1; + loggerConfig.enablePid = 1; } else if (utils_parse_var(envVar, "pid:no", NULL)) { - loggerConfig.pid = 0; + loggerConfig.enablePid = 0; } if (utils_parse_var(envVar, "level:debug", NULL)) { @@ -328,9 +341,267 @@ void utils_log_init(void) { loggerConfig.flushLevel = LOG_FATAL; } - LOG_INFO( - "Logger enabled (" LOG_STR_UMF_VERSION - "level: %s, flush: %s, pid: %s, timestamp: %s)", - level_to_str(loggerConfig.level), level_to_str(loggerConfig.flushLevel), - bool_to_str(loggerConfig.pid), bool_to_str(loggerConfig.timestamp)); + LOG_INFO("Logger enabled (" LOG_STR_UMF_VERSION + "level: %s, flush: %s, pid: %s, timestamp: %s)", + level_to_str(loggerConfig.level), + level_to_str(loggerConfig.flushLevel), + bool_to_str(loggerConfig.enablePid), + bool_to_str(loggerConfig.enableTimestamp)); +} + +// this is needed for logger unit test +#ifndef DISABLE_CTL_LOGGER +static umf_result_t +CTL_READ_HANDLER(timestamp)(void *ctx, umf_ctl_query_source_t source, void *arg, + size_t size, umf_ctl_index_utlist_t *indexes) { + /* suppress unused-parameter errors */ + (void)source, (void)indexes, (void)ctx; + + bool *arg_out = (bool *)arg; + + if (arg_out == NULL || size < sizeof(bool)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + *arg_out = loggerConfig.enableTimestamp; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(timestamp)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + /* suppress unused-parameter errors */ + (void)source, (void)indexes, (void)ctx; + + bool arg_in = *(bool *)arg; + + if (size < sizeof(bool)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + loggerConfig.enableTimestamp = arg_in; + LOG_INFO("Logger print timestamp set to %s", + bool_to_str(loggerConfig.enableTimestamp)); + return UMF_RESULT_SUCCESS; +} + +static umf_result_t CTL_READ_HANDLER(pid)(void *ctx, + umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + /* suppress unused-parameter errors */ + (void)source, (void)indexes, (void)ctx; + + bool *arg_out = (bool *)arg; + + if (arg_out == NULL || size < sizeof(bool)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + *arg_out = loggerConfig.enablePid; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t CTL_WRITE_HANDLER(pid)(void *ctx, + umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + /* suppress unused-parameter errors */ + (void)source, (void)indexes, (void)ctx; + + bool arg_in = *(bool *)arg; + + if (size < sizeof(bool)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + loggerConfig.enablePid = arg_in; + LOG_INFO("Logger print pid %s set to", bool_to_str(loggerConfig.enablePid)); + return UMF_RESULT_SUCCESS; +} + +static umf_result_t CTL_READ_HANDLER(level)(void *ctx, + umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + /* suppress unused-parameter errors */ + (void)source, (void)indexes, (void)ctx; + + bool *arg_out = (bool *)arg; + + if (arg_out == NULL || size < sizeof(utils_log_level_t)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + *arg_out = loggerConfig.level; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t CTL_WRITE_HANDLER(level)(void *ctx, + umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + /* suppress unused-parameter errors */ + (void)source, (void)indexes, (void)ctx; + + utils_log_level_t *arg_in = (utils_log_level_t *)arg; + + if (arg_in == NULL || *arg_in < LOG_DEBUG || *arg_in > LOG_FATAL || + size < sizeof(int)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + utils_log_level_t old = loggerConfig.level; + + // if new log level is higher then LOG_INFO print log before changing log level + // so if user changes from LOG_INFO to higher log level, it will get information about change anyway + if (*arg_in > LOG_INFO) { + LOG_INFO("Logger level changed from %s to %s", level_to_str(old), + level_to_str(*arg_in)); + loggerConfig.level = *arg_in; + } else { + loggerConfig.level = *arg_in; + LOG_INFO("Logger level changed from %s to %s", level_to_str(old), + level_to_str(loggerConfig.level)); + } + + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_READ_HANDLER(flush_level)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + /* suppress unused-parameter errors */ + (void)source, (void)indexes, (void)ctx; + + utils_log_level_t *arg_out = (utils_log_level_t *)arg; + + if (arg_out == NULL || size < sizeof(utils_log_level_t)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + *arg_out = loggerConfig.flushLevel; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t +CTL_WRITE_HANDLER(flush_level)(void *ctx, umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + /* suppress unused-parameter errors */ + (void)source, (void)indexes, (void)ctx; + + utils_log_level_t *arg_in = (utils_log_level_t *)arg; + + if (arg_in == NULL || *arg_in < LOG_DEBUG || *arg_in > LOG_FATAL || + size < sizeof(int)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + loggerConfig.flushLevel = *arg_in; + LOG_INFO("Logger flush level set to %s", + level_to_str(loggerConfig.flushLevel)); + return UMF_RESULT_SUCCESS; +} + +static umf_result_t CTL_READ_HANDLER(output)(void *ctx, + umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + /* suppress unused-parameter errors */ + (void)source, (void)indexes, (void)ctx; + + char *arg_out = (char *)arg; + if (arg_out == NULL) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (loggerConfig.output == NULL) { + const char disabled[] = "disabled"; + if (size < sizeof(disabled)) { + LOG_ERR("Invalid output argument size: %zu, expected at least %zu", + size, sizeof(disabled)); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(arg_out, disabled, size); + return UMF_RESULT_SUCCESS; + } + if (size < strlen(loggerConfig.file_name)) { + LOG_ERR("Invalid output argument size: %zu, expected at least %zu", + size, strlen(loggerConfig.file_name)); + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + strncpy(arg_out, loggerConfig.file_name, size); + return UMF_RESULT_SUCCESS; } + +static umf_result_t CTL_WRITE_HANDLER(output)(void *ctx, + umf_ctl_query_source_t source, + void *arg, size_t size, + umf_ctl_index_utlist_t *indexes) { + /* suppress unused-parameter errors */ + (void)source, (void)indexes, (void)ctx, (void)size; + + const char *arg_in = (const char *)arg; + + FILE *oldHandle = loggerConfig.output; + const char *oldName = + *loggerConfig.file_name == '\0' ? loggerConfig.file_name : "disabled"; + + if (arg_in == NULL) { + if (loggerConfig.output) { + LOG_INFO("Logger disabled"); + if (oldHandle != stdout && oldHandle != stderr) { + fclose(oldHandle); + } + loggerConfig.output = NULL; + loggerConfig.file_name[0] = '\0'; + } + return UMF_RESULT_SUCCESS; + } + + FILE *newHandle = NULL; + + if (strcmp(arg_in, "stdout") == 0) { + newHandle = stdout; + strncpy(loggerConfig.file_name, "stdout", MAX_FILE_PATH); + } else if (strcmp(arg_in, "stderr") == 0) { + newHandle = stderr; + strncpy(loggerConfig.file_name, "stderr", MAX_FILE_PATH); + } else { + newHandle = fopen(arg_in, "a"); + if (!newHandle) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + strncpy(loggerConfig.file_name, arg_in, MAX_FILE_PATH - 1); + loggerConfig.file_name[MAX_FILE_PATH - 1] = + '\0'; // ensure null-termination + } + + loggerConfig.output = newHandle; + LOG_INFO("Logger output changed from %s to %s", oldName, + loggerConfig.file_name); + + if (oldHandle && oldHandle != stdout && oldHandle != stderr) { + fclose(oldHandle); + } + + return UMF_RESULT_SUCCESS; +} + +static const struct ctl_argument CTL_ARG(timestamp) = CTL_ARG_BOOLEAN; +static const struct ctl_argument CTL_ARG(pid) = CTL_ARG_BOOLEAN; +static const struct ctl_argument CTL_ARG(level) = CTL_ARG_INT; +static const struct ctl_argument CTL_ARG(flush_level) = CTL_ARG_INT; +static const struct ctl_argument + CTL_ARG(output) = CTL_ARG_STRING(MAX_FILE_PATH); + +const umf_ctl_node_t CTL_NODE(logger)[] = { + CTL_LEAF_RW(timestamp), CTL_LEAF_RW(pid), CTL_LEAF_RW(level), + CTL_LEAF_RW(flush_level), CTL_LEAF_RW(output), CTL_NODE_END, +}; +#endif diff --git a/src/utils/utils_log.h b/src/utils/utils_log.h index ab40121cec..15fdabec66 100644 --- a/src/utils/utils_log.h +++ b/src/utils/utils_log.h @@ -1,6 +1,6 @@ /* * - * Copyright (C) 2024 Intel Corporation + * Copyright (C) 2024-2025 Intel Corporation * * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception @@ -14,6 +14,8 @@ extern "C" { #endif +#include "ctl/ctl_internal.h" + typedef enum { LOG_DEBUG, LOG_INFO, @@ -22,31 +24,51 @@ typedef enum { LOG_FATAL } utils_log_level_t; -#define LOG_DEBUG(...) utils_log(LOG_DEBUG, __func__, __VA_ARGS__); -#define LOG_INFO(...) utils_log(LOG_INFO, __func__, __VA_ARGS__); -#define LOG_WARN(...) utils_log(LOG_WARNING, __func__, __VA_ARGS__); -#define LOG_ERR(...) utils_log(LOG_ERROR, __func__, __VA_ARGS__); -#define LOG_FATAL(...) utils_log(LOG_FATAL, __func__, __VA_ARGS__); +#ifdef UMF_DEVELOPER_MODE +#define UMF_STRINGIFY(x) #x +#define UMF_TOSTRING(x) UMF_STRINGIFY(x) +#define UMF_FILELINE_DESC() __FILE__ ":" UMF_TOSTRING(__LINE__) +#else +#define UMF_FILELINE_DESC() NULL +#endif -#define LOG_PDEBUG(...) utils_plog(LOG_DEBUG, __func__, __VA_ARGS__); -#define LOG_PINFO(...) utils_plog(LOG_INFO, __func__, __VA_ARGS__); -#define LOG_PWARN(...) utils_plog(LOG_WARNING, __func__, __VA_ARGS__); -#define LOG_PERR(...) utils_plog(LOG_ERROR, __func__, __VA_ARGS__); -#define LOG_PFATAL(...) utils_plog(LOG_FATAL, __func__, __VA_ARGS__); +#define LOG_DEBUG(...) \ + utils_log(LOG_DEBUG, UMF_FILELINE_DESC(), __func__, __VA_ARGS__); +#define LOG_INFO(...) \ + utils_log(LOG_INFO, UMF_FILELINE_DESC(), __func__, __VA_ARGS__); +#define LOG_WARN(...) \ + utils_log(LOG_WARNING, UMF_FILELINE_DESC(), __func__, __VA_ARGS__); +#define LOG_ERR(...) \ + utils_log(LOG_ERROR, UMF_FILELINE_DESC(), __func__, __VA_ARGS__); +#define LOG_FATAL(...) \ + utils_log(LOG_FATAL, UMF_FILELINE_DESC(), __func__, __VA_ARGS__); + +#define LOG_PDEBUG(...) \ + utils_plog(LOG_DEBUG, UMF_FILELINE_DESC(), __func__, __VA_ARGS__); +#define LOG_PINFO(...) \ + utils_plog(LOG_INFO, UMF_FILELINE_DESC(), __func__, __VA_ARGS__); +#define LOG_PWARN(...) \ + utils_plog(LOG_WARNING, UMF_FILELINE_DESC(), __func__, __VA_ARGS__); +#define LOG_PERR(...) \ + utils_plog(LOG_ERROR, UMF_FILELINE_DESC(), __func__, __VA_ARGS__); +#define LOG_PFATAL(...) \ + utils_plog(LOG_FATAL, UMF_FILELINE_DESC(), __func__, __VA_ARGS__); void utils_log_init(void); #ifdef _WIN32 -void utils_log(utils_log_level_t level, const char *func, const char *format, - ...); -void utils_plog(utils_log_level_t level, const char *func, const char *format, - ...); +void utils_log(utils_log_level_t level, const char *fileline, const char *func, + const char *format, ...); +void utils_plog(utils_log_level_t level, const char *fileline, const char *func, + const char *format, ...); #else -void utils_log(utils_log_level_t level, const char *func, const char *format, - ...) __attribute__((format(printf, 3, 4))); -void utils_plog(utils_log_level_t level, const char *func, const char *format, - ...) __attribute__((format(printf, 3, 4))); +void utils_log(utils_log_level_t level, const char *fileline, const char *func, + const char *format, ...) __attribute__((format(printf, 4, 5))); +void utils_plog(utils_log_level_t level, const char *fileline, const char *func, + const char *format, ...) __attribute__((format(printf, 4, 5))); #endif +extern const umf_ctl_node_t CTL_NODE(logger)[]; + #ifdef __cplusplus } #endif diff --git a/src/utils/utils_name.h b/src/utils/utils_name.h new file mode 100644 index 0000000000..6914d06ba3 --- /dev/null +++ b/src/utils/utils_name.h @@ -0,0 +1,46 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#ifndef UMF_UTILS_NAME_H +#define UMF_UTILS_NAME_H + +#include +#include + +#include "utils_log.h" + +#define MAX_NAME 64 + +static inline int utils_name_is_valid(const char *name) { + if (!name) { + return 0; + } + size_t len = strlen(name); + if (len > MAX_NAME) { + return 0; + } + for (size_t i = 0; i < len; ++i) { + char c = name[i]; + if (!isalnum((unsigned char)c) && c != '-' && c != '_') { + return 0; + } + } + return 1; +} + +static inline void utils_warn_invalid_name(const char *kind, const char *name) { + if (!utils_name_is_valid(name)) { + LOG_WARN("%s name \"%s\" is deprecated. It should be no more than 64 " + "characters including null character, containing only " + "alphanumerics, '_' or '-'. CTL functionality may be limited.", + kind, name); + } +} + +#endif /* UMF_UTILS_NAME_H */ diff --git a/src/utils/utils_posix_concurrency.c b/src/utils/utils_posix_concurrency.c index 44a3173611..0149263df3 100644 --- a/src/utils/utils_posix_concurrency.c +++ b/src/utils/utils_posix_concurrency.c @@ -38,35 +38,51 @@ int utils_mutex_unlock(utils_mutex_t *m) { } void utils_init_once(UTIL_ONCE_FLAG *flag, void (*oneCb)(void)) { + if (oneCb == NULL) { + LOG_FATAL("utils_init_once: callback is NULL"); + return; + } + pthread_once(flag, oneCb); } -utils_rwlock_t *utils_rwlock_init(utils_rwlock_t *ptr) { +int utils_rwlock_init(utils_rwlock_t *ptr) { pthread_rwlock_t *rwlock = (pthread_rwlock_t *)ptr; - int ret = pthread_rwlock_init(rwlock, NULL); - return ret == 0 ? ((utils_rwlock_t *)rwlock) : NULL; + return pthread_rwlock_init(rwlock, NULL); } void utils_rwlock_destroy_not_free(utils_rwlock_t *ptr) { pthread_rwlock_t *rwlock = (pthread_rwlock_t *)ptr; - int ret = pthread_rwlock_destroy(rwlock); - if (ret) { - LOG_ERR("pthread_rwlock_destroy failed"); + if (pthread_rwlock_destroy(rwlock) != 0) { + LOG_FATAL("pthread_rwlock_destroy failed"); + abort(); } } -int utils_read_lock(utils_rwlock_t *rwlock) { - return pthread_rwlock_rdlock((pthread_rwlock_t *)rwlock); +void utils_read_lock(utils_rwlock_t *rwlock) { + if (pthread_rwlock_rdlock((pthread_rwlock_t *)rwlock) != 0) { + LOG_FATAL("pthread_rwlock_rdlock failed"); + abort(); + } } -int utils_write_lock(utils_rwlock_t *rwlock) { - return pthread_rwlock_wrlock((pthread_rwlock_t *)rwlock); +void utils_write_lock(utils_rwlock_t *rwlock) { + if (pthread_rwlock_wrlock((pthread_rwlock_t *)rwlock) != 0) { + LOG_FATAL("pthread_rwlock_wrlock failed"); + abort(); + } } -int utils_read_unlock(utils_rwlock_t *rwlock) { - return pthread_rwlock_unlock((pthread_rwlock_t *)rwlock); +void utils_read_unlock(utils_rwlock_t *rwlock) { + if (pthread_rwlock_unlock((pthread_rwlock_t *)rwlock) != 0) { + LOG_FATAL("pthread_rwlock_unlock failed"); + abort(); + } } -int utils_write_unlock(utils_rwlock_t *rwlock) { - return pthread_rwlock_unlock((pthread_rwlock_t *)rwlock); +void utils_write_unlock(utils_rwlock_t *rwlock) { + if (pthread_rwlock_unlock((pthread_rwlock_t *)rwlock) != 0) { + LOG_FATAL("pthread_rwlock_unlock failed"); + abort(); + } } diff --git a/src/utils/utils_windows_common.c b/src/utils/utils_windows_common.c index 7aa8f7684d..4e1c63bd44 100644 --- a/src/utils/utils_windows_common.c +++ b/src/utils/utils_windows_common.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -47,10 +48,7 @@ int utils_getpid(void) { return GetCurrentProcessId(); } int utils_gettid(void) { return GetCurrentThreadId(); } -int utils_close_fd(int fd) { - (void)fd; // unused - return -1; -} +int utils_close_fd(int fd) { return CloseHandle((HANDLE)(uintptr_t)fd); } umf_result_t utils_errno_to_umf_result(int err) { (void)err; // unused @@ -58,10 +56,41 @@ umf_result_t utils_errno_to_umf_result(int err) { } umf_result_t utils_duplicate_fd(int pid, int fd_in, int *fd_out) { - (void)pid; // unused - (void)fd_in; // unused - (void)fd_out; // unused - return UMF_RESULT_ERROR_NOT_SUPPORTED; + umf_result_t ret = UMF_RESULT_SUCCESS; + HANDLE current_process_handle = GetCurrentProcess(); + if (!current_process_handle) { + LOG_ERR("GetCurrentProcess() failed."); + return UMF_RESULT_ERROR_UNKNOWN; + } + + HANDLE source_process_handle = OpenProcess(PROCESS_DUP_HANDLE, FALSE, pid); + if (!source_process_handle) { + LOG_ERR("OpenProcess() failed for pid=%d.", pid); + ret = UMF_RESULT_ERROR_UNKNOWN; + goto release_current; + } + + HANDLE handle_in = (HANDLE)(uintptr_t)fd_in; + HANDLE handle_out = NULL; + BOOL result = DuplicateHandle(source_process_handle, handle_in, + current_process_handle, &handle_out, + GENERIC_READ | GENERIC_WRITE, FALSE, 0); + if (!result) { + LOG_ERR("DuplicateHandle() failed for pid=%d fd_in=%d handle_in=%p", + pid, fd_in, handle_in); + ret = UMF_RESULT_ERROR_UNKNOWN; + goto release_source; + } + + *fd_out = (int)(uintptr_t)handle_out; + +release_source: + CloseHandle(source_process_handle); + +release_current: + CloseHandle(current_process_handle); + + return ret; } umf_result_t utils_translate_mem_protection_flags(unsigned in_protection, diff --git a/src/utils/utils_windows_concurrency.c b/src/utils/utils_windows_concurrency.c index faa302be36..645d87b106 100644 --- a/src/utils/utils_windows_concurrency.c +++ b/src/utils/utils_windows_concurrency.c @@ -36,7 +36,7 @@ int utils_mutex_unlock(utils_mutex_t *mutex) { return 0; } -utils_rwlock_t *utils_rwlock_init(utils_rwlock_t *rwlock) { +int utils_rwlock_init(utils_rwlock_t *rwlock) { InitializeSRWLock(&rwlock->lock); return 0; // never fails } @@ -46,24 +46,20 @@ void utils_rwlock_destroy_not_free(utils_rwlock_t *rwlock) { (void)rwlock; } -int utils_read_lock(utils_rwlock_t *rwlock) { - AcquireSRWLockShared(&rwlock->lock); - return 0; // never fails +void utils_read_lock(utils_rwlock_t *rwlock) { + AcquireSRWLockShared(&rwlock->lock); // never fails } -int utils_write_lock(utils_rwlock_t *rwlock) { - AcquireSRWLockExclusive(&rwlock->lock); - return 0; // never fails +void utils_write_lock(utils_rwlock_t *rwlock) { + AcquireSRWLockExclusive(&rwlock->lock); // never fails } -int utils_read_unlock(utils_rwlock_t *rwlock) { - ReleaseSRWLockShared(&rwlock->lock); - return 0; // never fails +void utils_read_unlock(utils_rwlock_t *rwlock) { + ReleaseSRWLockShared(&rwlock->lock); // never fails } -int utils_write_unlock(utils_rwlock_t *rwlock) { - ReleaseSRWLockExclusive(&rwlock->lock); - return 0; // never fails +void utils_write_unlock(utils_rwlock_t *rwlock) { + ReleaseSRWLockExclusive(&rwlock->lock); // never fails } static BOOL CALLBACK initOnceCb(PINIT_ONCE InitOnce, PVOID Parameter, diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index d4e027e47e..b6493b8582 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -11,25 +11,33 @@ if(CMAKE_C_COMPILER_ID STREQUAL "IntelLLVM") add_link_options(-static-intel) endif() -include(FetchContent) -FetchContent_Declare( - googletest - GIT_REPOSITORY https://github.com/google/googletest.git - GIT_TAG v1.15.2) - -# For Windows: Prevent overriding the parent project's compiler/linker settings -set(gtest_force_shared_crt - ON - CACHE BOOL "" FORCE) -set(INSTALL_GTEST - OFF - CACHE BOOL "" FORCE) -FetchContent_MakeAvailable(googletest) +set(GTEST_VER 1.15.2) + +find_package(GTest ${GTEST_VER} QUIET) + +if(NOT GTest_FOUND) + include(FetchContent) + FetchContent_Declare( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG v${GTEST_VER}) + + # For Windows: Prevent overriding the parent project's compiler/linker + # settings + set(gtest_force_shared_crt + ON + CACHE BOOL "" FORCE) + set(INSTALL_GTEST + OFF + CACHE BOOL "" FORCE) + FetchContent_MakeAvailable(googletest) +endif() enable_testing() set(UMF_TEST_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -set(UMF_UTILS_DIR ${UMF_CMAKE_SOURCE_DIR}/src/utils) -set(UMF_BA_DIR ${UMF_CMAKE_SOURCE_DIR}/src/base_alloc) +set(UMF_SRC_DIR ${UMF_CMAKE_SOURCE_DIR}/src) +set(UMF_UTILS_DIR ${UMF_SRC_DIR}/utils) +set(UMF_BA_DIR ${UMF_SRC_DIR}/base_alloc) function(build_umf_test) # Parameters: @@ -50,6 +58,7 @@ function(build_umf_test) set(TEST_TARGET_NAME test_${ARG_NAME}) set(LIB_DIRS ${LIB_DIRS} ${LIBHWLOC_LIBRARY_DIRS}) + set(INC_DIRS ${INC_DIRS} ${LIBHWLOC_INCLUDE_DIRS}) if(UMF_CUDA_ENABLED) set(INC_DIRS ${INC_DIRS} ${CUDA_INCLUDE_DIRS}) @@ -61,10 +70,6 @@ function(build_umf_test) set(LIB_DIRS ${LIB_DIRS} ${ZE_LOADER_LIBRARY_DIRS}) endif() - if(NOT UMF_DISABLE_HWLOC) - set(INC_DIRS ${INC_DIRS} ${LIBHWLOC_INCLUDE_DIRS}) - endif() - if(UMF_POOL_JEMALLOC_ENABLED) set(CPL_DEFS ${CPL_DEFS} UMF_POOL_JEMALLOC_ENABLED=1) endif() @@ -144,8 +149,9 @@ function(add_umf_test) if(WINDOWS) # add PATH to DLL on Windows set(DLL_PATH_LIST - "${DLL_PATH_LIST};PATH=path_list_append:${CMAKE_BINARY_DIR}/bin/;PATH=path_list_append:${CMAKE_BINARY_DIR}/bin/$/" - ) + "${DLL_PATH_LIST};" + "PATH=path_list_append:${CMAKE_BINARY_DIR}/bin/;" + "PATH=path_list_append:${CMAKE_BINARY_DIR}/bin/$/") # append PATH to DLLs NOTE: this would work only for the CMake ver >= # # 3.22. For the older versions, the PATH variable should be set in the @@ -155,6 +161,22 @@ function(add_umf_test) endif() endfunction() +function(add_umf_mocked_test test_name test_source_file) + add_umf_test( + NAME ${test_name} + SRCS ${test_source_file} ${UMF_UTILS_DIR}/utils_level_zero.cpp + LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST} umf_test_mocks + GTest::gmock) + + set_tests_properties( + test_${test_name} PROPERTIES + ENVIRONMENT + "UMF_LOG=level:debug\\\\\\\\;flush:debug\\\\\\\\;output:stdout;UMF_ZE_LOADER_LIB_NAME=$" + ) + target_compile_definitions(test_${test_name} PUBLIC USE_DLOPEN=1) + add_dependencies(test_${test_name} umf_ze_loopback) +endfunction() + add_subdirectory(common) if(UMF_BUILD_SHARED_LIBRARY) @@ -195,10 +217,12 @@ add_umf_test( SRCS memoryProviderAPI.cpp LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) -add_umf_test( - NAME logger - SRCS utils/utils_log.cpp ${UMF_UTILS_SOURCES} - LIBS ${UMF_LOGGER_LIBS}) +if(NOT UMF_BUILD_SHARED_LIBRARY) + add_umf_test( + NAME logger + SRCS utils/utils_log.cpp ${UMF_UTILS_SOURCES} ../src/ctl/ctl.c + LIBS ${UMF_LOGGER_LIBS} ${UMF_BA_FOR_TEST}) +endif() add_umf_test( NAME ctl_unittest @@ -208,7 +232,7 @@ add_umf_test( add_umf_test( NAME ctl_api SRCS ctl/ctl_api.cpp - LIBS ${UMF_UTILS_FOR_TEST}) + LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) add_umf_test( NAME utils_common @@ -222,6 +246,28 @@ if(LINUX) LIBS ${UMF_UTILS_FOR_TEST}) endif() +build_umf_test( + NAME ctl_env_app + SRCS ctl/ctl_env_app.cpp + LIBS ${UMF_UTILS_FOR_TEST} umf) + +file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/ctl/ctl_env_config1.cfg + DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/ctl) +file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/ctl/ctl_env_config2.cfg + DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/ctl) +file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/ctl/ctl_env_disjoint_pool.cfg + DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/ctl) + +add_umf_test( + NAME ctl_env_driver + SRCS ctl/ctl_env_driver.cpp + LIBS ${UMF_UTILS_FOR_TEST}) + +target_compile_definitions( + test_ctl_env_driver + PRIVATE CTL_ENV_APP="$" + CTL_CONF_FILE_DIR="${CMAKE_CURRENT_BINARY_DIR}/ctl") + add_umf_test( NAME coarse_lib SRCS coarse_lib.cpp @@ -248,7 +294,7 @@ add_umf_test( SRCS c_api/disjoint_pool.c LIBS ${UMF_UTILS_FOR_TEST}) -if(LINUX AND (NOT UMF_DISABLE_HWLOC)) +if(LINUX) # this test uses the file provider add_umf_test( NAME disjoint_pool_file_prov @@ -256,28 +302,26 @@ if(LINUX AND (NOT UMF_DISABLE_HWLOC)) LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) endif() -if(UMF_POOL_JEMALLOC_ENABLED - AND UMF_POOL_SCALABLE_ENABLED - AND (NOT UMF_DISABLE_HWLOC)) +if(UMF_POOL_JEMALLOC_ENABLED AND UMF_POOL_SCALABLE_ENABLED) add_umf_test(NAME c_api_multi_pool SRCS c_api/multi_pool.c) endif() -if(UMF_POOL_JEMALLOC_ENABLED AND (NOT UMF_DISABLE_HWLOC)) +if(UMF_POOL_JEMALLOC_ENABLED) add_umf_test( NAME jemalloc_pool SRCS pools/jemalloc_pool.cpp malloc_compliance_tests.cpp LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) endif() -if(UMF_POOL_SCALABLE_ENABLED AND (NOT UMF_DISABLE_HWLOC)) +if(UMF_POOL_SCALABLE_ENABLED) add_umf_test( NAME scalable_pool SRCS pools/scalable_pool.cpp malloc_compliance_tests.cpp LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) endif() -if(LINUX AND (NOT UMF_DISABLE_HWLOC)) # OS-specific functions are implemented - # only for Linux now +if(LINUX) # OS-specific functions are implemented + # only for Linux now if(PkgConfig_FOUND) pkg_check_modules(LIBNUMA numa) endif() @@ -363,6 +407,10 @@ if(LINUX AND (NOT UMF_DISABLE_HWLOC)) # OS-specific functions are implemented NAME provider_tracking_fixture_tests SRCS provider_tracking_fixture_tests.cpp malloc_compliance_tests.cpp LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) + add_umf_test( + NAME provider_properties + SRCS properties/provider_properties.cpp + LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) # This test requires Linux-only file memory provider if(UMF_POOL_JEMALLOC_ENABLED) @@ -402,11 +450,10 @@ else() LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) endif() -if(UMF_DISABLE_HWLOC) - add_umf_test( - NAME provider_os_memory_not_impl - SRCS provider_os_memory_not_impl.cpp - LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) +if(UMF_BUILD_LEVEL_ZERO_PROVIDER) + add_umf_mocked_test(provider_level_zero_residency + providers/provider_level_zero_residency.cpp) + add_umf_mocked_test(pool_residency pools/pool_residency.cpp) endif() if(UMF_BUILD_GPU_TESTS AND UMF_LEVEL_ZERO_ENABLED) @@ -434,6 +481,12 @@ if(UMF_BUILD_GPU_TESTS AND UMF_LEVEL_ZERO_ENABLED) LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) target_compile_definitions(test_provider_level_zero_dlopen_local PUBLIC USE_DLOPEN=1 OPEN_ZE_LIBRARY_GLOBAL=0) + + add_umf_test( + NAME provider_properties_level_zero + SRCS properties/provider_properties_level_zero.cpp + ${UMF_UTILS_DIR}/utils_level_zero.cpp + LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST} ze_loader) endif() if(NOT UMF_BUILD_LEVEL_ZERO_PROVIDER) @@ -466,6 +519,12 @@ if(UMF_BUILD_GPU_TESTS AND UMF_BUILD_CUDA_PROVIDER) LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST}) target_compile_definitions(test_provider_cuda_dlopen_local PUBLIC USE_DLOPEN=1 OPEN_CU_LIBRARY_GLOBAL=0) + + add_umf_test( + NAME provider_properties_cuda + SRCS properties/provider_properties_cuda.cpp + providers/cuda_helpers.cpp + LIBS ${UMF_UTILS_FOR_TEST} ${UMF_BA_FOR_TEST} cuda) else() message( STATUS @@ -555,12 +614,37 @@ function(add_umf_ipc_test) set(SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}) endif() - file(COPY ${SRC_DIR}/${ARG_TEST}.sh DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) + if(WINDOWS) + set(EXT py) + else() + set(EXT sh) + endif() - add_test( - NAME ${TEST_NAME} - COMMAND ${ARG_TEST}.sh - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + file(COPY ${SRC_DIR}/${ARG_TEST}.${EXT} + DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) + + if(WINDOWS) + add_test( + NAME ${TEST_NAME} + COMMAND python ${ARG_TEST}.${EXT} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + else() + add_test( + NAME ${TEST_NAME} + COMMAND ${ARG_TEST}.${EXT} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + endif() + + if(WINDOWS) + set_tests_properties(${TEST_NAME} PROPERTIES + ENVIRONMENT "BUILD_TYPE=${CMAKE_BUILD_TYPE}") + # add PATH to DLL on Windows + set(DLL_PATH_LIST + "${DLL_PATH_LIST};PATH=path_list_append:${CMAKE_BINARY_DIR}/bin/;PATH=path_list_append:${CMAKE_BINARY_DIR}/bin/$/" + ) + set_property(TEST ${TEST_NAME} PROPERTY ENVIRONMENT_MODIFICATION + "${DLL_PATH_LIST}") + endif() set_tests_properties(${TEST_NAME} PROPERTIES LABELS "umf") set_tests_properties(${TEST_NAME} PROPERTIES TIMEOUT 60) @@ -569,8 +653,44 @@ function(add_umf_ipc_test) endif() endfunction() +if(WINDOWS) + set(UMF_IPC_LIBS ws2_32) +endif() + +if(UMF_BUILD_GPU_TESTS AND UMF_LEVEL_ZERO_ENABLED) + build_umf_test( + NAME ipc_level_zero_prov_consumer + SRCS providers/ipc_level_zero_prov_consumer.c common/ipc_common.c + providers/ipc_level_zero_prov_common.c + ${UMF_UTILS_DIR}/utils_level_zero.cpp + LIBS ze_loader ${UMF_IPC_LIBS} ${UMF_UTILS_FOR_TEST}) + build_umf_test( + NAME ipc_level_zero_prov_producer + SRCS providers/ipc_level_zero_prov_producer.c common/ipc_common.c + providers/ipc_level_zero_prov_common.c + ${UMF_UTILS_DIR}/utils_level_zero.cpp + LIBS ze_loader ${UMF_IPC_LIBS} ${UMF_UTILS_FOR_TEST}) + add_umf_ipc_test(TEST ipc_level_zero_prov SRC_DIR providers) +endif() + +if(UMF_BUILD_GPU_TESTS AND UMF_BUILD_CUDA_PROVIDER) + build_umf_test( + NAME ipc_cuda_prov_consumer + SRCS providers/ipc_cuda_prov_consumer.c common/ipc_common.c + providers/ipc_cuda_prov_common.c providers/cuda_helpers.cpp + LIBS cuda ${UMF_IPC_LIBS} ${UMF_UTILS_FOR_TEST}) + build_umf_test( + NAME ipc_cuda_prov_producer + SRCS providers/ipc_cuda_prov_producer.c common/ipc_common.c + providers/ipc_cuda_prov_common.c providers/cuda_helpers.cpp + LIBS cuda ${UMF_IPC_LIBS} ${UMF_UTILS_FOR_TEST}) + add_umf_ipc_test(TEST ipc_cuda_prov SRC_DIR providers) +endif() + +# TODO IPC tests for OS, file, devdax providers and proxy lib are supported only +# on Linux - skipping if(LINUX) - if(NOT UMF_DISABLE_HWLOC AND UMF_POOL_SCALABLE_ENABLED) + if(UMF_POOL_SCALABLE_ENABLED) build_umf_test( NAME ipc_os_prov_consumer SRCS ipc_os_prov_consumer.c common/ipc_common.c @@ -612,45 +732,16 @@ if(LINUX) add_umf_ipc_test(TEST ipc_file_prov_fsdax) endif() - # TODO add IPC tests for CUDA - - if(UMF_BUILD_GPU_TESTS AND UMF_LEVEL_ZERO_ENABLED) - build_umf_test( - NAME ipc_level_zero_prov_consumer - SRCS providers/ipc_level_zero_prov_consumer.c common/ipc_common.c - providers/ipc_level_zero_prov_common.c - ${UMF_UTILS_DIR}/utils_level_zero.cpp - LIBS ze_loader ${UMF_UTILS_FOR_TEST}) - build_umf_test( - NAME ipc_level_zero_prov_producer - SRCS providers/ipc_level_zero_prov_producer.c common/ipc_common.c - providers/ipc_level_zero_prov_common.c - ${UMF_UTILS_DIR}/utils_level_zero.cpp - LIBS ze_loader ${UMF_UTILS_FOR_TEST}) - add_umf_ipc_test(TEST ipc_level_zero_prov SRC_DIR providers) - endif() - - if(UMF_BUILD_GPU_TESTS AND UMF_BUILD_CUDA_PROVIDER) - build_umf_test( - NAME ipc_cuda_prov_consumer - SRCS providers/ipc_cuda_prov_consumer.c common/ipc_common.c - providers/ipc_cuda_prov_common.c providers/cuda_helpers.cpp - LIBS cuda ${UMF_UTILS_FOR_TEST}) - build_umf_test( - NAME ipc_cuda_prov_producer - SRCS providers/ipc_cuda_prov_producer.c common/ipc_common.c - providers/ipc_cuda_prov_common.c providers/cuda_helpers.cpp - LIBS cuda ${UMF_UTILS_FOR_TEST}) - add_umf_ipc_test(TEST ipc_cuda_prov SRC_DIR providers) - endif() else() - message(STATUS "IPC tests are supported on Linux only - skipping") + message( + STATUS + "IPC tests for OS, file, devdax providers and proxy lib are supported only on Linux - skipping" + ) endif() if(LINUX AND UMF_BUILD_SHARED_LIBRARY - AND UMF_POOL_SCALABLE_ENABLED - AND NOT UMF_DISABLE_HWLOC) + AND UMF_POOL_SCALABLE_ENABLED) add_umf_test( NAME init_teardown SRCS test_init_teardown.c @@ -740,7 +831,7 @@ if(LINUX ) endif() - if(EXAMPLES AND NOT UMF_DISABLE_HWLOC) + if(EXAMPLES) set(STANDALONE_CMAKE_OPTIONS "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}" ) diff --git a/test/coarse_lib.cpp b/test/coarse_lib.cpp index 41cac1128e..598df11ca2 100644 --- a/test/coarse_lib.cpp +++ b/test/coarse_lib.cpp @@ -114,7 +114,14 @@ INSTANTIATE_TEST_SUITE_P( CoarseWithMemoryStrategyTest, CoarseWithMemoryStrategyTest, ::testing::Values(UMF_COARSE_MEMORY_STRATEGY_FASTEST, UMF_COARSE_MEMORY_STRATEGY_FASTEST_BUT_ONE, - UMF_COARSE_MEMORY_STRATEGY_CHECK_ALL_SIZE)); + UMF_COARSE_MEMORY_STRATEGY_CHECK_ALL_SIZE), + ([](auto const &info) -> std::string { + static const char *names[] = { + "UMF_COARSE_MEMORY_STRATEGY_FASTEST", + "UMF_COARSE_MEMORY_STRATEGY_FASTEST_BUT_ONE", + "UMF_COARSE_MEMORY_STRATEGY_CHECK_ALL_SIZE"}; + return names[info.index]; + })); TEST_P(CoarseWithMemoryStrategyTest, coarseTest_basic_provider) { umf_memory_provider_handle_t malloc_memory_provider; diff --git a/test/common/CMakeLists.txt b/test/common/CMakeLists.txt index 6cffe5cfe8..da6817223b 100644 --- a/test/common/CMakeLists.txt +++ b/test/common/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2023-2024 Intel Corporation +# Copyright (C) 2023-2025 Intel Corporation # Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception @@ -20,3 +20,21 @@ add_umf_library( target_include_directories(umf_test_common PRIVATE ${UMF_CMAKE_SOURCE_DIR}/include) + +if(UMF_BUILD_LEVEL_ZERO_PROVIDER) + add_library(umf_test_mocks STATIC level_zero_mocks.cpp) + target_link_libraries(umf_test_mocks GTest::gmock umf_utils) + target_include_directories( + umf_test_mocks PUBLIC ${UMF_CMAKE_SOURCE_DIR}/include ${UMF_SRC_DIR} + ${UMF_UTILS_DIR} ${LEVEL_ZERO_INCLUDE_DIRS}) + + add_umf_library( + NAME umf_ze_loopback + TYPE SHARED + SRCS ze_loopback.cpp + LINUX_MAP_FILE ${CMAKE_CURRENT_SOURCE_DIR}/ze_loopback.map + WINDOWS_DEF_FILE ${CMAKE_CURRENT_SOURCE_DIR}/ze_loopback.def) + + target_include_directories(umf_ze_loopback + PUBLIC ${LEVEL_ZERO_INCLUDE_DIRS}) +endif() diff --git a/test/common/fork_helpers.hpp b/test/common/fork_helpers.hpp new file mode 100644 index 0000000000..3887f97034 --- /dev/null +++ b/test/common/fork_helpers.hpp @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#pragma once + +#include "umf.h" +#include + +#include +#include + +#ifndef _WIN32 +#include +#include +#include +#endif + +namespace umf_test { + +constexpr int ForkedTestSuccess = 0; +constexpr int ForkedTestFailure = 1; +constexpr int ForkedTestSkip = 77; + +template void run_in_fork(Func &&func) { +#ifndef _WIN32 + static_assert(std::is_invocable_r_v, + "run_in_fork requires a void-returning callable"); + + pid_t pid = fork(); + ASSERT_NE(pid, -1) << "fork failed"; + + if (pid == 0) { + std::forward(func)(); + + auto *unit = ::testing::UnitTest::GetInstance(); + const ::testing::TestInfo *info = + unit ? unit->current_test_info() : nullptr; + const ::testing::TestResult *result = info ? info->result() : nullptr; + + if (result != nullptr) { + if (result->Skipped()) { + _exit(ForkedTestSkip); + } + if (result->Failed()) { + _exit(ForkedTestFailure); + } + } + umfTearDown(); // exit not call destructor so we need to call it manually + _exit(ForkedTestSuccess); + } + + int status = 0; + ASSERT_EQ(waitpid(pid, &status, 0), pid) << "waitpid failed"; + + if (!WIFEXITED(status)) { + FAIL() << "Forked test terminated abnormally."; + } + + int exit_code = WEXITSTATUS(status); + if (exit_code == ForkedTestSkip) { + GTEST_SKIP() << "Forked test body requested skip."; + } + + ASSERT_EQ(exit_code, ForkedTestSuccess) + << "Forked test exited with code " << exit_code; +#else + (void)func; + GTEST_SKIP() << "Fork-based tests are not supported on Windows."; +#endif +} + +} // namespace umf_test diff --git a/test/common/ipc_common.c b/test/common/ipc_common.c index 5e9b911be8..879d392868 100644 --- a/test/common/ipc_common.c +++ b/test/common/ipc_common.c @@ -5,13 +5,21 @@ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception */ +#ifdef _WIN32 +#define _WINSOCK_DEPRECATED_NO_WARNINGS +#include +typedef int socklen_t; +typedef SSIZE_T ssize_t; +#else #include -#include -#include -#include #include #include #include +#endif + +#include +#include +#include #include "ipc_common.h" @@ -53,25 +61,39 @@ Generally communication between the producer and the consumer looks like: */ int consumer_connect(int port) { + +#ifdef _WIN32 + WSADATA wsaData; + SOCKET producer_socket, consumer_socket; +#else + int producer_socket = -1; + int consumer_socket = -1; +#endif + struct sockaddr_in consumer_addr; struct sockaddr_in producer_addr; int producer_addr_len; - int producer_socket = -1; - int consumer_socket = -1; - int ret = -1; + +#ifdef _WIN32 + // initialize Winsock + if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0) { + fprintf(stderr, "WSAStartup failed: %d\n", WSAGetLastError()); + return -1; + } +#endif // create a socket consumer_socket = socket(AF_INET, SOCK_STREAM, 0); if (consumer_socket < 0) { fprintf(stderr, "[consumer] ERROR: creating socket failed\n"); - return -1; + goto err_WSA_cleanup; } fprintf(stderr, "[consumer] Socket created\n"); // set the IP address and the port consumer_addr.sin_family = AF_INET; - consumer_addr.sin_port = htons(port); + consumer_addr.sin_port = htons((uint16_t)port); consumer_addr.sin_addr.s_addr = inet_addr(INET_ADDR); // bind to the IP address and the port @@ -101,14 +123,24 @@ int consumer_connect(int port) { } fprintf(stderr, "[consumer] Producer connected at IP %s and port %i\n", - inet_ntoa(producer_addr.sin_addr), ntohs(producer_addr.sin_port)); + inet_ntoa(producer_addr.sin_addr), + (int)ntohs(producer_addr.sin_port)); - ret = producer_socket; // success + return (int)producer_socket; // success err_close_consumer_socket: +#ifdef _WIN32 + closesocket(consumer_socket); +#else close(consumer_socket); +#endif - return ret; +err_WSA_cleanup: +#ifdef _WIN32 + WSACleanup(); +#endif + + return -1; } int run_consumer(int port, const umf_memory_pool_ops_t *pool_ops, @@ -117,7 +149,13 @@ int run_consumer(int port, const umf_memory_pool_ops_t *pool_ops, void *provider_params, memcopy_callback_t memcopy_callback, void *memcopy_ctx) { char consumer_message[MSG_SIZE]; + +#ifdef _WIN32 + SOCKET producer_socket; +#else int producer_socket = -1; +#endif + int ret = -1; umf_memory_provider_handle_t provider = NULL; umf_result_t umf_result = UMF_RESULT_ERROR_UNKNOWN; @@ -171,8 +209,8 @@ int run_consumer(int port, const umf_memory_pool_ops_t *pool_ops, IPC_handle_size); // send confirmation to the producer (IPC handle size) - recv_len = - send(producer_socket, &IPC_handle_size, sizeof(IPC_handle_size), 0); + recv_len = send(producer_socket, (const char *)&IPC_handle_size, + sizeof(IPC_handle_size), 0); if (recv_len < 0) { fprintf(stderr, "[consumer] ERROR: sending confirmation failed\n"); goto err_free_recv_buffer; @@ -214,8 +252,8 @@ int run_consumer(int port, const umf_memory_pool_ops_t *pool_ops, strcpy(consumer_message, "SKIP"); // send the SKIP response to the producer - send(producer_socket, consumer_message, strlen(consumer_message) + 1, - 0); + send(producer_socket, consumer_message, + (int)strlen(consumer_message) + 1, 0); goto err_free_recv_buffer; } @@ -249,8 +287,8 @@ int run_consumer(int port, const umf_memory_pool_ops_t *pool_ops, strcpy(consumer_message, CONSUMER_MSG); // send response to the producer - if (send(producer_socket, consumer_message, strlen(consumer_message) + 1, - 0) < 0) { + if (send(producer_socket, consumer_message, + (int)strlen(consumer_message) + 1, 0) < 0) { fprintf(stderr, "[consumer] ERROR: send() failed\n"); goto err_closeIPCHandle; } @@ -273,7 +311,12 @@ int run_consumer(int port, const umf_memory_pool_ops_t *pool_ops, free(recv_buffer); err_close_producer_socket: +#ifdef _WIN32 + closesocket(producer_socket); + WSACleanup(); +#else close(producer_socket); +#endif err_umfMemoryPoolDestroy: umfPoolDestroy(pool); @@ -295,20 +338,35 @@ int run_consumer(int port, const umf_memory_pool_ops_t *pool_ops, int producer_connect(int port) { struct sockaddr_in consumer_addr; + +#ifdef _WIN32 + WSADATA wsaData; + SOCKET producer_socket; +#else int producer_socket = -1; +#endif + +#ifdef _WIN32 + // initialize Winsock + if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0) { + fprintf(stderr, "WSAStartup failed. Error Code: %d\n", + WSAGetLastError()); + return -1; + } +#endif // create a producer socket producer_socket = socket(AF_INET, SOCK_STREAM, 0); if (producer_socket < 0) { fprintf(stderr, "[producer] ERROR: Unable to create socket\n"); - return -1; + goto err_WSA_cleanup; } fprintf(stderr, "[producer] Socket created\n"); // set IP address and port the same as for the consumer consumer_addr.sin_family = AF_INET; - consumer_addr.sin_port = htons(port); + consumer_addr.sin_port = htons((uint16_t)port); consumer_addr.sin_addr.s_addr = inet_addr(INET_ADDR); // send connection request to the consumer @@ -321,10 +379,19 @@ int producer_connect(int port) { fprintf(stderr, "[producer] Connected to the consumer\n"); - return producer_socket; // success + return (int)producer_socket; // success err_close_producer_socket_connect: +#ifdef _WIN32 + closesocket(producer_socket); +#else close(producer_socket); +#endif + +err_WSA_cleanup: +#ifdef _WIN32 + WSACleanup(); +#endif return -1; } @@ -340,18 +407,20 @@ int run_producer(int port, const umf_memory_pool_ops_t *pool_ops, int producer_socket = -1; char consumer_message[MSG_SIZE]; +#if !defined(_WIN32) ret = prctl(PR_SET_PTRACER, getppid()); if (ret == -1) { perror("PR_SET_PTRACER may be not supported. prctl() call failed"); goto err_end; } +#endif // create OS memory provider umf_result = umfMemoryProviderCreate(provider_ops, provider_params, &provider); if (umf_result != UMF_RESULT_SUCCESS) { fprintf(stderr, "[producer] ERROR: creating memory provider failed\n"); - return -1; + goto err_end; } umf_memory_pool_handle_t pool; @@ -421,8 +490,8 @@ int run_producer(int port, const umf_memory_pool_ops_t *pool_ops, } // send the IPC_handle_size to the consumer - ssize_t len = - send(producer_socket, &IPC_handle_size, sizeof(IPC_handle_size), 0); + ssize_t len = send(producer_socket, (const char *)&IPC_handle_size, + sizeof(IPC_handle_size), 0); if (len < 0) { fprintf(stderr, "[producer] ERROR: unable to send the message\n"); goto err_close_producer_socket; @@ -459,7 +528,8 @@ int run_producer(int port, const umf_memory_pool_ops_t *pool_ops, } // send the IPC_handle of IPC_handle_size to the consumer - if (send(producer_socket, IPC_handle, IPC_handle_size, 0) < 0) { + if (send(producer_socket, (const char *)IPC_handle, (int)IPC_handle_size, + 0) < 0) { fprintf(stderr, "[producer] ERROR: unable to send the message\n"); goto err_close_producer_socket; } @@ -512,7 +582,12 @@ int run_producer(int port, const umf_memory_pool_ops_t *pool_ops, } err_close_producer_socket: +#ifdef _WIN32 + closesocket(producer_socket); + WSACleanup(); +#else close(producer_socket); +#endif err_PutIPCHandle: umf_result = umfPutIPCHandle(IPC_handle); diff --git a/test/common/level_zero_mocks.cpp b/test/common/level_zero_mocks.cpp new file mode 100644 index 0000000000..8d64ba8595 --- /dev/null +++ b/test/common/level_zero_mocks.cpp @@ -0,0 +1,125 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#include + +#include + +#include "level_zero_mocks.h" +#include "utils_load_library.h" + +using namespace ::testing; + +umf_memory_provider_handle_t +LevelZeroMock::initializeMemoryProviderWithResidentDevices( + ze_device_handle_t device, std::vector residentDevices, + ze_context_handle_t context, ze_device_properties_t device_properties, + ze_memory_allocation_properties_t memory_allocation_properties) { + + umf_level_zero_memory_provider_params_handle_t params = nullptr; + EXPECT_EQ(umfLevelZeroMemoryProviderParamsCreate(¶ms), + UMF_RESULT_SUCCESS); + EXPECT_EQ(umfLevelZeroMemoryProviderParamsSetContext(params, context), + UMF_RESULT_SUCCESS); + EXPECT_EQ(umfLevelZeroMemoryProviderParamsSetDevice(params, device), + UMF_RESULT_SUCCESS); + EXPECT_EQ(umfLevelZeroMemoryProviderParamsSetMemoryType( + params, UMF_MEMORY_TYPE_DEVICE), + UMF_RESULT_SUCCESS); + EXPECT_EQ( + umfLevelZeroMemoryProviderParamsSetResidentDevices( + params, residentDevices.data(), (uint32_t)residentDevices.size()), + UMF_RESULT_SUCCESS); + + // query min page size operation upon provider initialization + EXPECT_CALL(*this, zeDeviceGetProperties(device, _)) + .WillRepeatedly(DoAll(SetArgPointee<1>(device_properties), + Return(ZE_RESULT_SUCCESS))); + + void *POINTER_TESTING_PROPS = TestCreatePointer(0x77); + EXPECT_CALL(*this, zeMemAllocDevice(CONTEXT, _, _, _, device, _)) + .WillOnce(DoAll(SetArgPointee<5>(POINTER_TESTING_PROPS), + Return(ZE_RESULT_SUCCESS))); + for (auto dev : residentDevices) { + EXPECT_CALL(*this, zeContextMakeMemoryResident(context, dev, _, _)) + .WillOnce(Return(ZE_RESULT_SUCCESS)); + } + EXPECT_CALL(*this, zeMemGetAllocProperties(context, _, _, _)) + .WillOnce(DoAll(SetArgPointee<2>(memory_allocation_properties), + Return(ZE_RESULT_SUCCESS))); + EXPECT_CALL(*this, zeMemFree(CONTEXT, POINTER_TESTING_PROPS)) + .WillOnce(Return(ZE_RESULT_SUCCESS)); + + umf_memory_provider_handle_t provider = nullptr; + EXPECT_EQ(umfMemoryProviderCreate(umfLevelZeroMemoryProviderOps(), params, + &provider), + UMF_RESULT_SUCCESS); + EXPECT_NE(provider, nullptr); + + umfLevelZeroMemoryProviderParamsDestroy(params); + return provider; +} + +ze_device_properties_t TestCreateDeviceProperties() { + return ze_device_properties_t{ZE_STRUCTURE_TYPE_DEVICE_PROPERTIES, + nullptr, + ZE_DEVICE_TYPE_GPU, + 0, + 0, + 0, + 0, + 0, + 1024, + 100, + 20, + 16, + 256, + 8, + 2, + 4, + 1, + 8, + 8, + {123}, + "TESTGPU"}; +}; + +ze_memory_allocation_properties_t +TestCreateMemoryAllocationProperties(uint32_t modifier) { + return ze_memory_allocation_properties_t{ + ZE_STRUCTURE_TYPE_MEMORY_ALLOCATION_PROPERTIES, nullptr, + ZE_MEMORY_TYPE_DEVICE, modifier, 2048}; +} + +void MockedLevelZeroTestEnvironment::SetUp() { + const char *lib_name = getenv("UMF_ZE_LOADER_LIB_NAME"); + ASSERT_NE(lib_name, nullptr); + if (lib_name == nullptr) { + return; // to avoid nullptr deref coverity issue + } + ASSERT_NE(lib_name[0], '\0'); + + lib_handle = utils_open_library(lib_name, 0); + ASSERT_NE(lib_handle, nullptr); + + void *l0interface_sym = + utils_get_symbol_addr(lib_handle, "level_zero_mock", lib_name); + ASSERT_NE(l0interface_sym, nullptr); + + l0interface = static_cast(l0interface_sym); + ASSERT_NE(l0interface, nullptr); + + ASSERT_EQ(*l0interface, nullptr); +} + +void MockedLevelZeroTestEnvironment::TearDown() { + utils_close_library(lib_handle); +} + +LevelZero **MockedLevelZeroTestEnvironment::l0interface; diff --git a/test/common/level_zero_mocks.h b/test/common/level_zero_mocks.h new file mode 100644 index 0000000000..35a5e2b7ab --- /dev/null +++ b/test/common/level_zero_mocks.h @@ -0,0 +1,96 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#ifndef UMF_TEST_PROVIDER_LEVEL_ZERO_MOCKS_H +#define UMF_TEST_PROVIDER_LEVEL_ZERO_MOCKS_H + +#include +#include + +#include "utils_log.h" +#include "ze_loopback.h" + +// TEST CREATE methods for objects + +template constexpr T TestCreatePointer(uintptr_t modifier = 0) { + return reinterpret_cast(static_cast(0x1000) + modifier); +} + +ze_device_properties_t TestCreateDeviceProperties(); + +ze_memory_allocation_properties_t +TestCreateMemoryAllocationProperties(uint32_t modifier = 0); + +// already created common instances for tests writing convenience + +static const auto DEVICE_0 = TestCreatePointer(0); +static const auto DEVICE_1 = TestCreatePointer(1); +static const auto DEVICE_2 = TestCreatePointer(2); +static const auto DEVICE_3 = TestCreatePointer(3); +static const auto DEVICE_4 = TestCreatePointer(4); +static const auto DEVICE_5 = TestCreatePointer(5); + +static const auto CONTEXT = TestCreatePointer(); +static const auto DEVICE_PROPS = TestCreateDeviceProperties(); +static const auto MEM_PROPS = TestCreateMemoryAllocationProperties(); + +static void *POINTER_0 = TestCreatePointer(0x90); +static void *POINTER_1 = TestCreatePointer(0x91); +static void *POINTER_2 = TestCreatePointer(0x92); +static void *POINTER_3 = TestCreatePointer(0x93); +static void *POINTER_4 = TestCreatePointer(0x94); + +class LevelZeroMock : public LevelZero { + public: + MOCK_METHOD3(zeContextCreate, + ze_result_t(ze_driver_handle_t, const ze_context_desc_t *, + ze_context_handle_t *)); + MOCK_METHOD2(zeDeviceGetProperties, + ze_result_t(ze_device_handle_t, ze_device_properties_t *)); + MOCK_METHOD6(zeMemAllocDevice, + ze_result_t(ze_context_handle_t, + const ze_device_mem_alloc_desc_t *, size_t, size_t, + ze_device_handle_t, void **)); + MOCK_METHOD4(zeMemGetAllocProperties, + ze_result_t(ze_context_handle_t, const void *, + ze_memory_allocation_properties_t *, + ze_device_handle_t *)); + MOCK_METHOD4(zeContextMakeMemoryResident, + ze_result_t(ze_context_handle_t, ze_device_handle_t, void *, + size_t)); + MOCK_METHOD4(zeContextEvictMemory, + ze_result_t(ze_context_handle_t, ze_device_handle_t, void *, + size_t)); + MOCK_METHOD2(zeMemFree, + ze_result_t(ze_context_handle_t hContext, void *ptr)); + + // A helper function that (1) sets all EXPECT_CALLs related to successful l0 provider creation + // and initialization (2) calls l0 provider creation and initialization + umf_memory_provider_handle_t initializeMemoryProviderWithResidentDevices( + ze_device_handle_t device, + std::vector residentDevices, + ze_context_handle_t context = CONTEXT, + ze_device_properties_t device_properties = DEVICE_PROPS, + ze_memory_allocation_properties_t memory_allocation_properties = + MEM_PROPS); +}; + +// important, makes UMF load ze_loopback instead of regular l0 +class MockedLevelZeroTestEnvironment : public ::testing::Environment { + + void *lib_handle; + + public: + static LevelZero **l0interface; + + void SetUp() override; + void TearDown() override; +}; + +#endif //UMF_TEST_PROVIDER_LEVEL_ZERO_MOCKS_H diff --git a/test/common/pool.hpp b/test/common/pool.hpp index 5cae85411e..30abd08cad 100644 --- a/test/common/pool.hpp +++ b/test/common/pool.hpp @@ -25,6 +25,38 @@ #include "provider.hpp" #include "utils/cpp_helpers.hpp" +typedef void *(*pfnPoolParamsCreate)(); +typedef umf_result_t (*pfnPoolParamsDestroy)(void *); + +typedef void *(*pfnProviderParamsCreate)(); +typedef umf_result_t (*pfnProviderParamsDestroy)(void *); + +using poolCreateExtParams = + std::tuple; + +std::string poolCreateExtParamsNameGen( + const testing::TestParamInfo &info) { + + const umf_memory_pool_ops_t *pool_ops = std::get<0>(info.param); + const umf_memory_provider_ops_t *provider_ops = std::get<3>(info.param); + + const char *poolName = NULL; + pool_ops->get_name(NULL, &poolName); + + const char *providerName = NULL; + provider_ops->get_name(NULL, &providerName); + + // if there are multiple cases with the same pool and provider combination, + // add the index to the name + std::string poolParams = std::get<1>(info.param) + ? "_w_params_" + std::to_string(info.index) + : ""; + + return std::string(poolName) + poolParams + "_" + providerName; +} + namespace umf_test { umf_memory_pool_handle_t @@ -108,25 +140,26 @@ typedef struct pool_base_t { umf_result_t initialize(umf_memory_provider_handle_t) noexcept { return UMF_RESULT_SUCCESS; }; - void *malloc([[maybe_unused]] size_t size) noexcept { return nullptr; } + void *malloc(size_t) noexcept { return nullptr; } void *calloc(size_t, size_t) noexcept { return nullptr; } void *realloc(void *, size_t) noexcept { return nullptr; } void *aligned_malloc(size_t, size_t) noexcept { return nullptr; } - umf_result_t malloc_usable_size(const void *, size_t *size) noexcept { - if (size) { - *size = 0; - } - return UMF_RESULT_SUCCESS; + umf_result_t malloc_usable_size(const void *, size_t *) noexcept { + return UMF_RESULT_ERROR_UNKNOWN; } - umf_result_t free(void *) noexcept { return UMF_RESULT_SUCCESS; } + umf_result_t free(void *) noexcept { return UMF_RESULT_ERROR_UNKNOWN; } umf_result_t get_last_allocation_error() noexcept { - return UMF_RESULT_SUCCESS; + return UMF_RESULT_ERROR_UNKNOWN; } - umf_result_t get_name(const char **name) noexcept { - if (name) { - *name = "pool_base"; - } - return UMF_RESULT_SUCCESS; + umf_result_t get_name(const char **) noexcept { + return UMF_RESULT_ERROR_UNKNOWN; + } + umf_result_t ext_ctl(umf_ctl_query_source_t, const char *, void *, size_t, + umf_ctl_query_type_t, va_list) noexcept { + return UMF_RESULT_ERROR_INVALID_CTL_PATH; + } + umf_result_t ext_trim_memory(size_t) noexcept { + return UMF_RESULT_ERROR_UNKNOWN; } } pool_base_t; @@ -177,6 +210,11 @@ struct malloc_pool : public pool_base_t { } return UMF_RESULT_SUCCESS; } + + umf_result_t ext_trim_memory(size_t) noexcept { + // malloc_pool frees all memory immediately, so we have nothing to trim + return UMF_RESULT_SUCCESS; + } }; umf_memory_pool_ops_t MALLOC_POOL_OPS = diff --git a/test/common/pool_trace.c b/test/common/pool_trace.c index ce944479f0..c05a16d325 100644 --- a/test/common/pool_trace.c +++ b/test/common/pool_trace.c @@ -99,6 +99,14 @@ static umf_result_t traceGetName(void *pool, const char **name) { return UMF_RESULT_SUCCESS; } +static umf_result_t traceTrimMemory(void *pool, size_t minBytesToKeep) { + trace_pool_t *trace_pool = (trace_pool_t *)pool; + + trace_pool->params.trace_handler(trace_pool->params.trace_context, + "trim_memory"); + return umfPoolTrimMemory(trace_pool->params.hUpstreamPool, minBytesToKeep); +} + umf_memory_pool_ops_t UMF_TRACE_POOL_OPS = { .version = UMF_POOL_OPS_VERSION_CURRENT, .initialize = traceInitialize, @@ -111,4 +119,5 @@ umf_memory_pool_ops_t UMF_TRACE_POOL_OPS = { .free = traceFree, .get_last_allocation_error = traceGetLastStatus, .get_name = traceGetName, + .ext_trim_memory = traceTrimMemory, }; diff --git a/test/common/provider.hpp b/test/common/provider.hpp index e52dd614a6..947392559c 100644 --- a/test/common/provider.hpp +++ b/test/common/provider.hpp @@ -18,6 +18,36 @@ #include "test_helpers.h" #include "utils/cpp_helpers.hpp" +typedef void *(*pfnProviderParamsCreate)(); +typedef umf_result_t (*pfnProviderParamsDestroy)(void *); + +using providerCreateExtParams = + std::tuple; + +std::string providerCreateExtParamsNameGen( + const testing::TestParamInfo param) { + const umf_memory_provider_ops_t *provider_ops = std::get<0>(param.param); + + const char *providerName = NULL; + provider_ops->get_name(NULL, &providerName); + + return providerName; +} + +void providerCreateExt(providerCreateExtParams params, + umf_test::provider_unique_handle_t *handle) { + umf_memory_provider_handle_t hProvider = nullptr; + auto [provider_ops, provider_params] = params; + + auto ret = + umfMemoryProviderCreate(provider_ops, provider_params, &hProvider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_NE(hProvider, nullptr); + + *handle = umf_test::provider_unique_handle_t(hProvider, + &umfMemoryProviderDestroy); +} + namespace umf_test { umf_memory_provider_handle_t @@ -100,6 +130,29 @@ typedef struct provider_base_t { [[maybe_unused]] size_t size) noexcept { return UMF_RESULT_ERROR_NOT_SUPPORTED; } + + umf_result_t ext_ctl([[maybe_unused]] umf_ctl_query_source_t source, + [[maybe_unused]] const char *name, + [[maybe_unused]] void *arg, + [[maybe_unused]] size_t size, + [[maybe_unused]] umf_ctl_query_type_t queryType, + [[maybe_unused]] va_list args) noexcept { + return UMF_RESULT_ERROR_INVALID_CTL_PATH; + } + + umf_result_t ext_get_allocation_properties( + [[maybe_unused]] const void *ptr, + [[maybe_unused]] umf_memory_property_id_t memory_property_id, + [[maybe_unused]] void *value) noexcept { + return UMF_RESULT_ERROR_UNKNOWN; + } + + umf_result_t ext_get_allocation_properties_size( + [[maybe_unused]] umf_memory_property_id_t memory_property_id, + [[maybe_unused]] size_t *size) noexcept { + return UMF_RESULT_ERROR_UNKNOWN; + } + virtual ~provider_base_t() = default; } provider_base_t; diff --git a/test/common/provider_null.c b/test/common/provider_null.c index 380cba47d3..2ce8c78ddb 100644 --- a/test/common/provider_null.c +++ b/test/common/provider_null.c @@ -5,9 +5,11 @@ #include #include -#include "provider_null.h" #include +#include "provider_null.h" +#include "utils_common.h" + static umf_result_t nullInitialize(const void *params, void **pool) { (void)params; *pool = NULL; @@ -22,9 +24,18 @@ static umf_result_t nullFinalize(void *pool) { static umf_result_t nullAlloc(void *provider, size_t size, size_t alignment, void **ptr) { (void)provider; - (void)size; - (void)alignment; - *ptr = NULL; + + if (ptr == NULL) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + + if (size == 0) { + *ptr = NULL; + return UMF_RESULT_SUCCESS; + } + + *ptr = (void *)ALIGN_UP_SAFE(0xDEADBEAF, alignment); // any not-NULL value + return UMF_RESULT_SUCCESS; } @@ -133,6 +144,24 @@ static umf_result_t nullCloseIpcHandle(void *provider, void *ptr, size_t size) { return UMF_RESULT_SUCCESS; } +static umf_result_t +nullGetAllocationProperties(void *provider, const void *ptr, + umf_memory_property_id_t propertyId, void *value) { + (void)provider; + (void)ptr; + (void)propertyId; + (void)value; + return UMF_RESULT_SUCCESS; +} + +static umf_result_t nullGetAllocationPropertiesSize( + void *provider, umf_memory_property_id_t propertyId, size_t *size) { + (void)provider; + (void)propertyId; + (void)size; + return UMF_RESULT_SUCCESS; +} + umf_memory_provider_ops_t UMF_NULL_PROVIDER_OPS = { .version = UMF_PROVIDER_OPS_VERSION_CURRENT, .initialize = nullInitialize, @@ -152,4 +181,6 @@ umf_memory_provider_ops_t UMF_NULL_PROVIDER_OPS = { .ext_put_ipc_handle = nullPutIpcHandle, .ext_open_ipc_handle = nullOpenIpcHandle, .ext_close_ipc_handle = nullCloseIpcHandle, + .ext_get_allocation_properties = nullGetAllocationProperties, + .ext_get_allocation_properties_size = nullGetAllocationPropertiesSize, }; diff --git a/test/common/provider_trace.c b/test/common/provider_trace.c index adb8083369..c780197c95 100644 --- a/test/common/provider_trace.c +++ b/test/common/provider_trace.c @@ -5,10 +5,12 @@ #include #include -#include "provider_trace.h" #include #include +#include "memory_properties_internal.h" +#include "provider_trace.h" + static umf_result_t traceInitialize(const void *params, void **pool) { umf_provider_trace_params_t *trace_pool = (umf_provider_trace_params_t *)malloc( @@ -98,6 +100,11 @@ static umf_result_t traceName(void *provider, const char **name) { umf_provider_trace_params_t *traceProvider = (umf_provider_trace_params_t *)provider; + // if ops->get_name is called with null provider it must return default provider name + if (!provider) { + *name = "trace"; + return UMF_RESULT_SUCCESS; + } traceProvider->trace_handler(traceProvider->trace_context, "name"); return umfMemoryProviderGetName(traceProvider->hUpstreamProvider, name); } @@ -214,4 +221,6 @@ umf_memory_provider_ops_t UMF_TRACE_PROVIDER_OPS = { .ext_put_ipc_handle = tracePutIpcHandle, .ext_open_ipc_handle = traceOpenIpcHandle, .ext_close_ipc_handle = traceCloseIpcHandle, + .ext_ctl = NULL, + .ext_get_allocation_properties = NULL, }; diff --git a/test/common/ze_loopback.cpp b/test/common/ze_loopback.cpp new file mode 100644 index 0000000000..d43219a3c5 --- /dev/null +++ b/test/common/ze_loopback.cpp @@ -0,0 +1,280 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#include +#include + +#include "ze_loopback.h" + +ZE_APIEXPORT LevelZero *level_zero_mock = nullptr; + +static void check_mock_present() { + if (level_zero_mock == nullptr) { + std::cerr << "level_zero_mock was not set\n"; + abort(); + } +} + +#define FAIL_NOT_IMPLEMENTED \ + std::cerr << __func__ << " not implemented in ze_loopback.cpp\n"; \ + abort(); + +// +// libze_ops from src/utils/utils_level_zero.cpp +// + +ZE_APIEXPORT ze_result_t ZE_APICALL zeInit(ze_init_flags_t flags) { + (void)flags; + return ZE_RESULT_SUCCESS; +} + +ZE_APIEXPORT ze_result_t ZE_APICALL zeDriverGet(uint32_t *pCount, + ze_driver_handle_t *phDrivers) { + (void)phDrivers; + (void)pCount; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL zeDeviceGet(ze_driver_handle_t hDriver, + uint32_t *pCount, + ze_device_handle_t *phDevices) { + (void)hDriver; + (void)pCount; + (void)phDevices; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL zeDeviceGetProperties( + ze_device_handle_t hDevice, ze_device_properties_t *pDeviceProperties) { + check_mock_present(); + return level_zero_mock->zeDeviceGetProperties(hDevice, pDeviceProperties); +} + +ZE_APIEXPORT ze_result_t ZE_APICALL +zeContextCreate(ze_driver_handle_t hDriver, const ze_context_desc_t *desc, + ze_context_handle_t *phContext) { + (void)hDriver; + (void)desc; + (void)phContext; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL +zeContextDestroy(ze_context_handle_t hContext) { + (void)hContext; + FAIL_NOT_IMPLEMENTED; +} + +ZE_APIEXPORT ze_result_t ZE_APICALL +zeCommandQueueCreate(ze_context_handle_t hContext, ze_device_handle_t hDevice, + const ze_command_queue_desc_t *desc, + ze_command_queue_handle_t *phCommandQueue) { + (void)hContext; + (void)hDevice; + (void)desc; + (void)phCommandQueue; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL +zeCommandQueueDestroy(ze_command_queue_handle_t hCommandQueue) { + (void)hCommandQueue; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL zeCommandQueueExecuteCommandLists( + ze_command_queue_handle_t hCommandQueue, uint32_t numCommandLists, + ze_command_list_handle_t *phCommandLists, ze_fence_handle_t hFence) { + (void)hCommandQueue; + (void)numCommandLists; + (void)phCommandLists; + (void)hFence; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL zeCommandQueueSynchronize( + ze_command_queue_handle_t hCommandQueue, uint64_t timeout) { + (void)hCommandQueue; + (void)timeout; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL +zeCommandListCreate(ze_context_handle_t hContext, ze_device_handle_t hDevice, + const ze_command_list_desc_t *desc, + ze_command_list_handle_t *phCommandList) { + (void)hContext; + (void)hDevice; + (void)desc; + (void)phCommandList; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL +zeCommandListDestroy(ze_command_list_handle_t hCommandList) { + (void)hCommandList; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL +zeCommandListClose(ze_command_list_handle_t hCommandList) { + (void)hCommandList; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL zeCommandListAppendMemoryCopy( + ze_command_list_handle_t hCommandList, void *dstptr, const void *srcptr, + size_t size, ze_event_handle_t hSignalEvent, uint32_t numWaitEvents, + ze_event_handle_t *phWaitEvents) { + (void)hCommandList; + (void)dstptr; + (void)srcptr; + (void)size; + (void)hSignalEvent; + (void)numWaitEvents; + (void)phWaitEvents; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL zeCommandListAppendMemoryFill( + ze_command_list_handle_t hCommandList, void *ptr, const void *pattern, + size_t pattern_size, size_t size, ze_event_handle_t hSignalEvent, + uint32_t numWaitEvents, ze_event_handle_t *phWaitEvents) { + (void)hCommandList; + (void)ptr; + (void)pattern; + (void)pattern_size; + (void)size; + (void)hSignalEvent; + (void)numWaitEvents; + (void)phWaitEvents; + FAIL_NOT_IMPLEMENTED +} + +ZE_APIEXPORT ze_result_t ZE_APICALL +zeMemGetAllocProperties(ze_context_handle_t hContext, const void *ptr, + ze_memory_allocation_properties_t *pMemAllocProperties, + ze_device_handle_t *phDevice) { + check_mock_present(); + return level_zero_mock->zeMemGetAllocProperties( + hContext, ptr, pMemAllocProperties, phDevice); +} + +ZE_APIEXPORT ze_result_t ZE_APICALL zeMemAllocDevice( + ze_context_handle_t hContext, const ze_device_mem_alloc_desc_t *device_desc, + size_t size, size_t alignment, ze_device_handle_t hDevice, void **pptr) { + check_mock_present(); + return level_zero_mock->zeMemAllocDevice(hContext, device_desc, size, + alignment, hDevice, pptr); +} + +ZE_APIEXPORT ze_result_t ZE_APICALL zeMemFree(ze_context_handle_t hContext, + void *ptr) { + check_mock_present(); + return level_zero_mock->zeMemFree(hContext, ptr); +} + +ZE_APIEXPORT ze_result_t ZE_APICALL +zeDeviceGetMemoryProperties(ze_device_handle_t hDevice, uint32_t *pCount, + ze_device_memory_properties_t *pMemProperties) { + (void)hDevice; + (void)pCount; + (void)pMemProperties; + FAIL_NOT_IMPLEMENTED +} + +// +// ze_ops_t operations from src/provider/provider_level_zero.c +// + +ze_result_t ZE_APICALL zeMemAllocHost(ze_context_handle_t hContext, + const ze_host_mem_alloc_desc_t *host_desc, + size_t size, size_t alignment, + void **pptr) { + (void)hContext; + (void)host_desc; + (void)size; + (void)alignment; + (void)pptr; + FAIL_NOT_IMPLEMENTED +} + +ze_result_t ZE_APICALL zeMemAllocShared( + ze_context_handle_t hContext, const ze_device_mem_alloc_desc_t *device_desc, + const ze_host_mem_alloc_desc_t *host_desc, size_t size, size_t alignment, + ze_device_handle_t hDevice, void **pptr) { + (void)hContext; + (void)device_desc; + (void)host_desc; + (void)size; + (void)alignment; + (void)hDevice; + (void)pptr; + FAIL_NOT_IMPLEMENTED +} + +ze_result_t ZE_APICALL zeMemGetIpcHandle(ze_context_handle_t hContext, + const void *ptr, + ze_ipc_mem_handle_t *pIpcHandle) { + (void)hContext; + (void)ptr; + (void)pIpcHandle; + FAIL_NOT_IMPLEMENTED +} + +ze_result_t ZE_APICALL zeMemPutIpcHandle(ze_context_handle_t hContext, + ze_ipc_mem_handle_t handle) { + (void)hContext; + (void)handle; + FAIL_NOT_IMPLEMENTED +} + +ze_result_t ZE_APICALL zeMemOpenIpcHandle(ze_context_handle_t hContext, + ze_device_handle_t hDevice, + ze_ipc_mem_handle_t handle, + ze_ipc_memory_flags_t flags, + void **pptr) { + (void)hContext; + (void)hDevice; + (void)handle; + (void)flags; + (void)pptr; + FAIL_NOT_IMPLEMENTED +} + +ze_result_t ZE_APICALL zeMemCloseIpcHandle(ze_context_handle_t hContext, + const void *ptr) { + (void)hContext; + (void)ptr; + FAIL_NOT_IMPLEMENTED +} + +ze_result_t ZE_APICALL zeContextMakeMemoryResident(ze_context_handle_t hContext, + ze_device_handle_t hDevice, + void *ptr, size_t size) { + check_mock_present(); + return level_zero_mock->zeContextMakeMemoryResident(hContext, hDevice, ptr, + size); +} + +ze_result_t ZE_APICALL zeContextEvictMemory(ze_context_handle_t hContext, + ze_device_handle_t hDevice, + void *ptr, size_t size) { + check_mock_present(); + return level_zero_mock->zeContextEvictMemory(hContext, hDevice, ptr, size); +} + +ze_result_t ZE_APICALL +zeMemFreeExt(ze_context_handle_t hContext, + const ze_memory_free_ext_desc_t *pMemFreeDesc, void *ptr) { + (void)hContext; + (void)pMemFreeDesc; + (void)ptr; + FAIL_NOT_IMPLEMENTED +} diff --git a/test/common/ze_loopback.def b/test/common/ze_loopback.def new file mode 100644 index 0000000000..2dc73ab317 --- /dev/null +++ b/test/common/ze_loopback.def @@ -0,0 +1,38 @@ +;;;; Begin Copyright Notice +; Copyright (C) 2025 Intel Corporation +; Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +; SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +;;;; End Copyright Notice + +LIBRARY UMF_ZE_LOOPBACK + +EXPORTS + level_zero_mock + zeInit + zeDriverGet + zeDeviceGet + zeDeviceGetProperties + zeContextCreate + zeContextDestroy + zeCommandQueueCreate + zeCommandQueueDestroy + zeCommandQueueExecuteCommandLists + zeCommandQueueSynchronize + zeCommandListCreate + zeCommandListDestroy + zeCommandListClose + zeCommandListAppendMemoryCopy + zeCommandListAppendMemoryFill + zeContextMakeMemoryResident + zeContextEvictMemory + zeMemGetAllocProperties + zeMemAllocDevice + zeMemAllocHost + zeMemAllocShared + zeMemFree + zeMemFreeExt + zeMemGetIpcHandle + zeMemPutIpcHandle + zeMemOpenIpcHandle + zeMemCloseIpcHandle + zeDeviceGetMemoryProperties diff --git a/test/common/ze_loopback.h b/test/common/ze_loopback.h new file mode 100644 index 0000000000..bbf95f141f --- /dev/null +++ b/test/common/ze_loopback.h @@ -0,0 +1,36 @@ +// Copyright (C) 2025 Intel Corporation +// Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#ifndef UMF_TEST_ZE_LOOPBACK_H +#define UMF_TEST_ZE_LOOPBACK_H + +#include "ze_api.h" + +class LevelZero { + public: + virtual ~LevelZero() = default; + + virtual ze_result_t zeContextCreate(ze_driver_handle_t, + const ze_context_desc_t *, + ze_context_handle_t *) = 0; + virtual ze_result_t zeDeviceGetProperties(ze_device_handle_t, + ze_device_properties_t *) = 0; + virtual ze_result_t zeMemAllocDevice(ze_context_handle_t, + const ze_device_mem_alloc_desc_t *, + size_t, size_t, ze_device_handle_t, + void **) = 0; + virtual ze_result_t + zeMemGetAllocProperties(ze_context_handle_t, const void *, + ze_memory_allocation_properties_t *, + ze_device_handle_t *) = 0; + virtual ze_result_t zeContextMakeMemoryResident(ze_context_handle_t, + ze_device_handle_t, void *, + size_t) = 0; + virtual ze_result_t zeContextEvictMemory(ze_context_handle_t, + ze_device_handle_t, void *, + size_t) = 0; + virtual ze_result_t zeMemFree(ze_context_handle_t hContext, void *ptr) = 0; +}; + +#endif //UMF_TEST_ZE_LOOPBACK_H diff --git a/test/common/ze_loopback.map b/test/common/ze_loopback.map new file mode 100644 index 0000000000..35c6ac9093 --- /dev/null +++ b/test/common/ze_loopback.map @@ -0,0 +1,40 @@ +# Copyright (C) 2025 Intel Corporation +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +# These functions are meant to be in unnamed scope. They are also not named +# with any umf prefix, as they should override functions with the same name. +{ + global: + level_zero_mock; + zeInit; + zeDriverGet; + zeDeviceGet; + zeDeviceGetProperties; + zeContextCreate; + zeContextDestroy; + zeCommandQueueCreate; + zeCommandQueueDestroy; + zeCommandQueueExecuteCommandLists; + zeCommandQueueSynchronize; + zeCommandListCreate; + zeCommandListDestroy; + zeCommandListClose; + zeCommandListAppendMemoryCopy; + zeCommandListAppendMemoryFill; + zeContextMakeMemoryResident; + zeContextEvictMemory; + zeMemGetAllocProperties; + zeMemAllocDevice; + zeMemAllocHost; + zeMemAllocShared; + zeMemFree; + zeMemFreeExt; + zeMemGetIpcHandle; + zeMemPutIpcHandle; + zeMemOpenIpcHandle; + zeMemCloseIpcHandle; + zeDeviceGetMemoryProperties; + local: + *; +}; diff --git a/test/ctl/ctl_api.cpp b/test/ctl/ctl_api.cpp index 55120961b1..47280d3acf 100644 --- a/test/ctl/ctl_api.cpp +++ b/test/ctl/ctl_api.cpp @@ -9,12 +9,8 @@ #include #include -#include -#include // For std::ref -#include #include #include -#include #include #include @@ -23,11 +19,11 @@ #include #include #include -#include -#include #include #include "../common/base.hpp" +#include "../common/fork_helpers.hpp" +#include "../common/provider.hpp" #include "gtest/gtest.h" using namespace umf_test; @@ -55,124 +51,6 @@ TEST_F(test, ctl_by_handle_os_provider) { umfMemoryProviderDestroy(hProvider); } -class Pool { - public: - Pool() : provider(NULL), pool(NULL) {} - - int instantiatePool(const umf_memory_pool_ops_t *pool_ops, - const void *pool_params, - umf_pool_create_flags_t flags = 0) { - freeResources(); - provider = create_memory_provider(); - if (provider == NULL) { - return -1; // Provider not supported - } - int ret = umfPoolCreate(pool_ops, provider, pool_params, flags, &pool); - if (ret != UMF_RESULT_SUCCESS) { - umfMemoryProviderDestroy(provider); - provider = NULL; - return -2; // Failed to create memory pool - } - return 0; // Success - } - - // Template specialization for different types of reference value - template T getReferenceValue() { - if constexpr (std::is_arithmetic_v) { - return 0xBAD; - } else if constexpr (std::is_same_v) { - return "0xBAD"; - } - } - - template - void validateQuery(umf_result_t (*ctlApiFunction)(const char *name, - void *arg, size_t, ...), - const char *name, T expectedValue, - umf_result_t expected) { - T value = getReferenceValue(); - umf_result_t ret; - char ret_buf[256] = {0}; - if constexpr (std::is_same_v) { - strncpy(ret_buf, value.c_str(), sizeof(ret_buf) - 1); - ret_buf[sizeof(ret_buf) - 1] = '\0'; // Ensure null-termination - ret = ctlApiFunction(name, (void *)ret_buf, sizeof(ret_buf), pool); - } else if constexpr (std::is_arithmetic_v) { - std::string value_str = std::to_string(value); - strncpy(ret_buf, value_str.c_str(), sizeof(ret_buf) - 1); - ret_buf[sizeof(ret_buf) - 1] = '\0'; // Ensure null-termination - ret = ctlApiFunction(name, (void *)ret_buf, sizeof(ret_buf), pool); - } else { - ret = ctlApiFunction(name, &value, sizeof(value), pool); - } - - ASSERT_EQ(ret, expected); - if (ret == UMF_RESULT_SUCCESS) { - ASSERT_EQ(ret_buf, expectedValue); - } - } - - template - void executeQuery(umf_result_t (*ctlApiFunction)(const char *name, - void *arg, size_t, ...), - const char *name, T value) { - size_t value_len; - if constexpr (std::is_arithmetic_v) { - value_len = sizeof(value); - } else if constexpr (std::is_same_v) { - value_len = strlen(value.c_str()); - } else if constexpr (std::is_same_v) { - value_len = strlen(value); - } else { - throw std::runtime_error("Unsupported type for value"); - } - umf_result_t ret = ctlApiFunction(name, (void *)value, value_len); - ASSERT_EQ(ret, UMF_RESULT_SUCCESS); - } - - void freeResources() { - if (pool) { - umfPoolDestroy(pool); - pool = NULL; - } - if (provider) { - umfMemoryProviderDestroy(provider); - provider = NULL; - } - if (data) { - free(data); - data = nullptr; - } - } - - umf_memory_provider_handle_t provider; - umf_memory_pool_handle_t pool; - void *data = nullptr; - - private: - // Create a memory provider - umf_memory_provider_handle_t create_memory_provider() { - const umf_memory_provider_ops_t *provider_ops = - umfFixedMemoryProviderOps(); - umf_fixed_memory_provider_params_handle_t params = NULL; - - data = malloc(1024 * 1024); - int ret = - umfFixedMemoryProviderParamsCreate(data, 1024 * 1024, ¶ms); - if (ret != UMF_RESULT_SUCCESS) { - return 0; - } - - ret = umfMemoryProviderCreate(provider_ops, params, &provider); - umfFixedMemoryProviderParamsDestroy(params); - if (ret != UMF_RESULT_SUCCESS) { - return 0; - } - - return provider; - } -}; - class CtlTest : public ::testing::Test { public: CtlTest() {} @@ -184,20 +62,21 @@ class CtlTest : public ::testing::Test { private: }; -/* Case: default settings - * This test sets a default value and then retrieves it */ +// setting default modifies global state - +// tests doing so should run in fork to ensure correct test isolation TEST_F(CtlTest, ctlDefault) { - const char *arg = "default_name"; - - auto res = umfCtlSet("umf.pool.default.some_pool.some_path", (void *)arg, - strlen(arg)); - ASSERT_EQ(res, UMF_RESULT_SUCCESS); - - char output[64] = {1}; - res = umfCtlGet("umf.pool.default.some_pool.some_path", (void *)output, - sizeof(output)); - ASSERT_EQ(res, UMF_RESULT_SUCCESS); - ASSERT_STREQ(output, arg); + umf_test::run_in_fork([] { + const char *arg = "default_name"; + ASSERT_EQ(umfCtlSet("umf.pool.default.some_pool.some_path", (void *)arg, + strlen(arg)), + UMF_RESULT_SUCCESS); + + char output[64] = {1}; + ASSERT_EQ(umfCtlGet("umf.pool.default.some_pool.some_path", + (void *)output, sizeof(output)), + UMF_RESULT_SUCCESS); + ASSERT_STREQ(output, arg); + }); } /* Case: umfCtlSet negative test */ @@ -234,147 +113,447 @@ TEST_F(CtlTest, ctlGetInvalid) { /* Case: multi-threaded test for pool defaults * This test sets a default value in multiple threads and then retrieves it */ TEST_F(CtlTest, ctlDefaultPoolMultithreaded) { - const size_t max_size = 10; - const size_t num_threads = 8; - std::vector threads; - std::atomic totalRecords = 0; - const char *predefined_value = "xyzzyx"; - std::string name_prefix = "umf.pool.default.some_pool."; - for (size_t i = 0; i < num_threads; i++) { - threads.emplace_back([i, &totalRecords, &predefined_value, &name_prefix, - max_size = max_size]() { - for (size_t j = 0; j < max_size; j++) { - std::string name = name_prefix + std::to_string(i * 10 + j); - umfCtlSet(name.c_str(), (void *)predefined_value, - strlen(predefined_value)); - std::atomic_fetch_add(&totalRecords, 1UL); - } - }); - } - for (auto &thread : threads) { - thread.join(); - } + umf_test::run_in_fork([] { + const size_t max_size = 10; + const size_t num_threads = 8; + std::vector threads; + std::atomic totalRecords = 0; + const char *predefined_value = "xyzzyx"; + std::string name_prefix = "umf.pool.default.some_pool."; + for (size_t i = 0; i < num_threads; i++) { + threads.emplace_back([i, &totalRecords, &predefined_value, + &name_prefix, max_size = max_size]() { + for (size_t j = 0; j < max_size; j++) { + std::string name = name_prefix + std::to_string(i * 10 + j); + umfCtlSet(name.c_str(), (void *)predefined_value, + strlen(predefined_value)); + std::atomic_fetch_add(&totalRecords, 1UL); + } + }); + } + for (auto &thread : threads) { + thread.join(); + } - // Check if all threads set the value correctly - // and retrieve it - ASSERT_EQ(totalRecords.load(), num_threads * max_size); + ASSERT_EQ(totalRecords.load(), num_threads * max_size); - char output[100] = {0}; - for (size_t i = 0; i < totalRecords.load(); i++) { - std::string name = name_prefix + std::to_string(i); - auto status = umfCtlGet(name.c_str(), (void *)output, sizeof(output)); - ASSERT_EQ(status, UMF_RESULT_SUCCESS); - ASSERT_EQ(std::string(output), std::string(predefined_value)); - } + char output[100] = {0}; + for (size_t i = 0; i < totalRecords.load(); i++) { + std::string name = name_prefix + std::to_string(i); + umf_result_t status = + umfCtlGet(name.c_str(), (void *)output, sizeof(output)); + ASSERT_EQ(status, UMF_RESULT_SUCCESS); + ASSERT_STREQ(output, predefined_value); + } + }); } -/* Case: overwriting an existing value for pool defaults - * This test sets a default value and then overwrites it with a new value */ -TEST_F(CtlTest, ctlDefaultPoolOverwrite) { - constexpr int max_size = 10; - std::vector values; - const std::string name = "umf.pool.default.some_pool"; - - for (int i = 0; i < max_size; i++) { - values.push_back("value_" + std::to_string(i)); - umfCtlSet(name.c_str(), (void *)values.back().c_str(), - values.back().size()); - } +struct ctl_provider_params { + const char *name; + int initial_value; +}; - char output[100] = {0}; - umf_result_t status = - umfCtlGet(name.c_str(), (void *)output, sizeof(output)); - ASSERT_EQ(status, UMF_RESULT_SUCCESS); - ASSERT_EQ(std::string(output), values.back()); -} +class ctl_provider : public umf_test::provider_base_t { + public: + ctl_provider() : name_ptr_(kDefaultName), stored_value_(0) {} -TEST_F(CtlTest, DISABLED_ctlNameValidation) { - std::string name = "umf.pool.default.disjoint.name"; - std::string value = "new_disjoint_pool_name"; - umf_disjoint_pool_params_handle_t params = NULL; + umf_result_t initialize(const ctl_provider_params *params) noexcept { + if (!params) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } - Pool p; - try { - p.executeQuery(umfCtlSet, name.c_str(), value.c_str()); - umf_result_t res = umfDisjointPoolParamsCreate(¶ms); - ASSERT_EQ(res, UMF_RESULT_SUCCESS); + stored_value_ = params->initial_value; + if (params->name) { + name_storage_ = params->name; + name_ptr_ = name_storage_.c_str(); + } else { + name_ptr_ = kDefaultName; + } - auto ret = p.instantiatePool(umfDisjointPoolOps(), params); - ASSERT_EQ(ret, 0); + return UMF_RESULT_SUCCESS; + } - p.validateQuery(umfCtlGet, "umf.pool.by_handle.{}.disjoint.name", - std::move(value), UMF_RESULT_SUCCESS); - } catch (...) { - GTEST_FAIL() << "Unknown exception!"; + umf_result_t get_name(const char **name) noexcept { + if (!name) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } + *name = name_ptr_; + return UMF_RESULT_SUCCESS; } - umfDisjointPoolParamsDestroy(params); - p.freeResources(); -} -TEST_F(CtlTest, DISABLED_ctlSizeValidation) { - std::string name = "umf.pool.default.disjoint.name"; - std::string value = "1234567890"; - umf_disjoint_pool_params_handle_t params = NULL; + umf_result_t ext_ctl(umf_ctl_query_source_t, const char *path, void *arg, + size_t size, umf_ctl_query_type_t queryType, + va_list) noexcept { + if (std::strcmp(path, "params.value") != 0) { + return UMF_RESULT_ERROR_INVALID_CTL_PATH; + } - Pool p; - try { - p.executeQuery(umfCtlSet, name.c_str(), value.c_str()); - umf_result_t res = umfDisjointPoolParamsCreate(¶ms); - ASSERT_EQ(res, UMF_RESULT_SUCCESS); + if (!arg || size != sizeof(int)) { + return UMF_RESULT_ERROR_INVALID_ARGUMENT; + } - auto ret = p.instantiatePool(umfDisjointPoolOps(), params); - ASSERT_EQ(ret, 0); + if (queryType == CTL_QUERY_WRITE) { + stored_value_ = *static_cast(arg); + return UMF_RESULT_SUCCESS; + } - char output[100] = {0}; - umfCtlGet("umf.pool.default.disjoint.name", output, sizeof(output)); - ASSERT_EQ(std::string(output), value); - - memset(output, 0, sizeof(output)); - umfCtlGet("umf.pool.default.disjoint.name", output, value.size() / 2); - auto half_value = value.substr(0, value.size() / 2); - ASSERT_EQ(half_value, std::string(output)); - } catch (...) { - GTEST_FAIL() << "Unknown exception!"; + if (queryType == CTL_QUERY_READ) { + *static_cast(arg) = stored_value_; + return UMF_RESULT_SUCCESS; + } + + return UMF_RESULT_ERROR_INVALID_ARGUMENT; } - umfDisjointPoolParamsDestroy(params); - p.freeResources(); + + private: + static constexpr const char *kDefaultName = "mock"; + std::string name_storage_; + const char *name_ptr_; + int stored_value_; +}; + +TEST_F(CtlTest, ctlProviderDefaultsCustomName) { + umf_test::run_in_fork([] { + static auto provider_ops = + umf_test::providerMakeCOps(); + + int canonical_default = 21; + ASSERT_EQ(umfCtlSet("umf.provider.default.mock.params.value", + &canonical_default, sizeof(canonical_default)), + UMF_RESULT_SUCCESS); + + const std::string custom_name = "custom_provider"; + int custom_default = 37; + const std::string custom_path = + "umf.provider.default." + custom_name + ".params.value"; + ASSERT_EQ(umfCtlSet(custom_path.c_str(), &custom_default, + sizeof(custom_default)), + UMF_RESULT_SUCCESS); + + ctl_provider_params custom_params{custom_name.c_str(), 0}; + umf_memory_provider_handle_t custom_handle = nullptr; + ASSERT_EQ(umfMemoryProviderCreate(&provider_ops, &custom_params, + &custom_handle), + UMF_RESULT_SUCCESS); + + int value = 0; + ASSERT_EQ(umfCtlGet("umf.provider.by_handle.{}.params.value", &value, + sizeof(value), custom_handle), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, custom_default); + ASSERT_EQ(umfMemoryProviderDestroy(custom_handle), UMF_RESULT_SUCCESS); + + ctl_provider_params canonical_params{nullptr, 7}; + umf_memory_provider_handle_t canonical_handle = nullptr; + ASSERT_EQ(umfMemoryProviderCreate(&provider_ops, &canonical_params, + &canonical_handle), + UMF_RESULT_SUCCESS); + + ASSERT_EQ(umfCtlGet("umf.provider.by_handle.{}.params.value", &value, + sizeof(value), canonical_handle), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, canonical_default); + ASSERT_EQ(umfMemoryProviderDestroy(canonical_handle), + UMF_RESULT_SUCCESS); + }); } -TEST_F(CtlTest, DISABLED_ctlExecInvalidSize) { - std::string name = "umf.pool.default.disjoint.name"; - ASSERT_EQ(umfCtlSet(name.c_str(), (void *)"test_value", 0), - UMF_RESULT_ERROR_INVALID_ARGUMENT); - ASSERT_EQ(umfCtlSet(name.c_str(), NULL, 10), - UMF_RESULT_ERROR_INVALID_ARGUMENT); +/* Case: overwriting an existing value for pool defaults + * This test sets a default value and then overwrites it with a new value */ +TEST_F(CtlTest, ctlDefaultPoolOverwrite) { + umf_test::run_in_fork([] { + constexpr int max_size = 10; + std::vector values; + const std::string name = "umf.pool.default.some_pool"; + + for (int i = 0; i < max_size; i++) { + values.push_back("value_" + std::to_string(i)); + umf_result_t set_status = + umfCtlSet(name.c_str(), (void *)values.back().c_str(), + values.back().size()); + ASSERT_EQ(set_status, UMF_RESULT_SUCCESS); + } + + char output[100] = {0}; + umf_result_t status = + umfCtlGet(name.c_str(), (void *)output, sizeof(output)); + ASSERT_EQ(status, UMF_RESULT_SUCCESS); + ASSERT_STREQ(output, values.back().c_str()); + }); } -#ifdef PROVIDER_DEFAULTS_NOT_IMPLEMENTED_YET TEST_F(CtlTest, ctlDefaultMultithreadedProvider) { - std::vector threads; - std::atomic totalRecords = 0; - const char *predefined_value = "xyzzyx"; - std::string name_prefix = "umf.provider.default.some_pool."; - for (int i = 0; i < 8; i++) { - threads.emplace_back( - [i, &totalRecords, &predefined_value, &name_prefix]() { + umf_test::run_in_fork([] { + std::vector threads; + std::atomic totalRecords = 0; + const char *predefined_value = "xyzzyx"; + std::string name_prefix = "umf.provider.default.some_provider."; + for (int i = 0; i < 8; i++) { + threads.emplace_back([i, &totalRecords, &predefined_value, + &name_prefix]() { for (int j = 0; j < 10; j++) { std::string name = name_prefix + std::to_string(i * 10 + j); umfCtlSet(name.c_str(), (void *)predefined_value, strlen(predefined_value)); - std::atomic_fetch_add(&totalRecords, 1); + std::atomic_fetch_add(&totalRecords, (size_t)1); } }); - } - for (auto &thread : threads) { - thread.join(); + } + for (auto &thread : threads) { + thread.join(); + } + + char output[100] = {0}; + for (size_t i = 0; i < totalRecords.load(); i++) { + std::string name = name_prefix + std::to_string(i); + umf_result_t status = + umfCtlGet(name.c_str(), (void *)output, sizeof(output)); + ASSERT_EQ(status, UMF_RESULT_SUCCESS); + ASSERT_STREQ(output, predefined_value); + } + }); +} + +TEST_F(test, ctl_logger_basic_rw) { + bool ts_set = true; + ASSERT_EQ(umfCtlSet("umf.logger.timestamp", &ts_set, sizeof(ts_set)), + UMF_RESULT_SUCCESS); + bool ts_get = false; + ASSERT_EQ(umfCtlGet("umf.logger.timestamp", &ts_get, sizeof(ts_get)), + UMF_RESULT_SUCCESS); + EXPECT_TRUE(ts_get); + + bool pid_set = 1; + ASSERT_EQ(umfCtlSet("umf.logger.pid", &pid_set, sizeof(pid_set)), + UMF_RESULT_SUCCESS); + bool pid_get = 0; + ASSERT_EQ(umfCtlGet("umf.logger.pid", &pid_get, sizeof(pid_get)), + UMF_RESULT_SUCCESS); + EXPECT_EQ(pid_get, 1); + + int level_set = 1; + ASSERT_EQ(umfCtlSet("umf.logger.level", &level_set, sizeof(level_set)), + UMF_RESULT_SUCCESS); + int level_get = 0; + ASSERT_EQ(umfCtlGet("umf.logger.level", &level_get, sizeof(level_get)), + UMF_RESULT_SUCCESS); + EXPECT_EQ(level_get, 1); + + int flush_set = 2; + ASSERT_EQ( + umfCtlSet("umf.logger.flush_level", &flush_set, sizeof(flush_set)), + UMF_RESULT_SUCCESS); + int flush_get = 0; + ASSERT_EQ( + umfCtlGet("umf.logger.flush_level", &flush_get, sizeof(flush_get)), + UMF_RESULT_SUCCESS); + EXPECT_EQ(flush_get, 2); + + const char out_name[] = "stdout"; + ASSERT_EQ( + umfCtlSet("umf.logger.output", (void *)out_name, sizeof(out_name)), + UMF_RESULT_SUCCESS); + const char out_get[256] = ""; + ASSERT_EQ(umfCtlGet("umf.logger.output", (void *)out_get, sizeof(out_get)), + UMF_RESULT_SUCCESS); + EXPECT_STREQ(out_get, "stdout"); +} + +TEST_F(test, ctl_logger_output_file) { + const char file_name[] = "ctl_log.txt"; + ASSERT_EQ( + umfCtlSet("umf.logger.output", (void *)file_name, sizeof(file_name)), + UMF_RESULT_SUCCESS); + const char out_get[256] = ""; + ASSERT_EQ(umfCtlGet("umf.logger.output", (void *)out_get, sizeof(out_get)), + UMF_RESULT_SUCCESS); + EXPECT_STREQ(out_get, file_name); +} + +TEST_F(test, ctl_by_name) { + umf_memory_provider_handle_t hProvider = NULL; + umf_os_memory_provider_params_handle_t os_memory_provider_params = NULL; + const umf_memory_provider_ops_t *os_provider_ops = umfOsMemoryProviderOps(); + if (os_provider_ops == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; } - char output[100] = {0}; - for (size_t i = 0; i < totalRecords.load(); i++) { - std::string name = name_prefix + std::to_string(i); - auto status = umfCtlGet(name.c_str(), (void *)output, sizeof(output)); - ASSERT_EQ(status, UMF_RESULT_SUCCESS); - ASSERT_EQ(std::string(output), std::string(predefined_value)); + int ret = umfOsMemoryProviderParamsCreate(&os_memory_provider_params); + ret = umfMemoryProviderCreate(os_provider_ops, os_memory_provider_params, + &hProvider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + umfOsMemoryProviderParamsDestroy(os_memory_provider_params); + + umf_disjoint_pool_params_handle_t disjoint_pool_params = NULL; + ret = umfDisjointPoolParamsCreate(&disjoint_pool_params); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *pool_name = "test_disjoint_pool"; + ret = umfDisjointPoolParamsSetName(disjoint_pool_params, pool_name); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_pool_handle_t hPool = NULL; + ret = umfPoolCreate(umfDisjointPoolOps(), hProvider, disjoint_pool_params, + 0, &hPool); + + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *pool_name2 = "test_disjoint_pool2"; + ret = umfDisjointPoolParamsSetName(disjoint_pool_params, pool_name2); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_pool_handle_t hPool2 = NULL; + ret = umfPoolCreate(umfDisjointPoolOps(), hProvider, disjoint_pool_params, + 0, &hPool2); + umfDisjointPoolParamsDestroy(disjoint_pool_params); + + size_t pool_count; + + ret = umfCtlGet("umf.pool.by_name.test_disjoint_pool.count", &pool_count, + sizeof(pool_count)); + + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(pool_count, 1ull); + + ret = umfCtlGet("umf.pool.by_name.test_disjoint_pool2.count", &pool_count, + sizeof(pool_count)); + + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(pool_count, 1ull); + + size_t alloc_count; + ret = umfCtlGet("umf.pool.by_name.{}.stats.alloc_count", &alloc_count, + sizeof(alloc_count), pool_name); + + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + + ret = umfCtlGet("umf.pool.by_name.{}.stats.alloc_count", &alloc_count, + sizeof(alloc_count), pool_name2); + + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + + // allocate from pool1 + void *ptr1 = umfPoolMalloc(hPool, 1024); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_NE(ptr1, nullptr); + + // we can use pool name in the string without {} too + ret = umfCtlGet("umf.pool.by_name.test_disjoint_pool.stats.alloc_count", + &alloc_count, sizeof(alloc_count)); + + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 1ull); + + ret = umfCtlGet("umf.pool.by_name.test_disjoint_pool2.stats.alloc_count", + &alloc_count, sizeof(alloc_count)); + + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + + ret = umfPoolFree(hPool, ptr1); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + // we can use index parameter too + ret = umfCtlGet("umf.pool.by_name.test_disjoint_pool.0.stats.alloc_count", + &alloc_count, sizeof(alloc_count)); + + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + + ret = umfCtlGet("umf.pool.by_name.test_disjoint_pool2.{}.stats.alloc_count", + &alloc_count, sizeof(alloc_count), 0); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + + // test too big pool index + ret = umfCtlGet("umf.pool.by_name.test_disjoint_pool2.10.stats.alloc_count", + &alloc_count, sizeof(alloc_count)); + ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + ret = umfPoolDestroy(hPool); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ret = umfPoolDestroy(hPool2); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ret = umfMemoryProviderDestroy(hProvider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); +} + +TEST_F(test, ctl_by_name_collision) { + umf_memory_provider_handle_t hProvider = NULL; + umf_os_memory_provider_params_handle_t os_memory_provider_params = NULL; + const umf_memory_provider_ops_t *os_provider_ops = umfOsMemoryProviderOps(); + if (os_provider_ops == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; } + + int ret = umfOsMemoryProviderParamsCreate(&os_memory_provider_params); + ret = umfMemoryProviderCreate(os_provider_ops, os_memory_provider_params, + &hProvider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + umfOsMemoryProviderParamsDestroy(os_memory_provider_params); + + umf_disjoint_pool_params_handle_t disjoint_pool_params = NULL; + ret = umfDisjointPoolParamsCreate(&disjoint_pool_params); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *pool_name = "test_disjoint_pool"; + ret = umfDisjointPoolParamsSetName(disjoint_pool_params, pool_name); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_pool_handle_t hPool = NULL; + ret = umfPoolCreate(umfDisjointPoolOps(), hProvider, disjoint_pool_params, + 0, &hPool); + + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_pool_handle_t hPool2 = NULL; + ret = umfPoolCreate(umfDisjointPoolOps(), hProvider, disjoint_pool_params, + 0, &hPool2); + umfDisjointPoolParamsDestroy(disjoint_pool_params); + + // allocate from pool1 + void *ptr1 = umfPoolMalloc(hPool, 1024); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_NE(ptr1, nullptr); + + size_t pool_count; + ret = umfCtlGet("umf.pool.by_name.test_disjoint_pool.count", &pool_count, + sizeof(pool_count)); + + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(pool_count, 2ull); + + // If there is more than one pool with the same name, + // CtlGet by_name will return an error + size_t alloc_count; + ret = umfCtlGet("umf.pool.by_name.{}.stats.alloc_count", &alloc_count, + sizeof(alloc_count), pool_name); + + ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + // ctl set and exec will still work. But there is no CTL entry for now to test it + + // todo: add test when ctl entries will be extended + + // we can read from specific pool with index argument + ret = umfCtlGet("umf.pool.by_name.test_disjoint_pool.0.stats.alloc_count", + &alloc_count, sizeof(alloc_count), pool_name, 0); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 1ull); + + ret = umfCtlGet("umf.pool.by_name.{}.1.stats.alloc_count", &alloc_count, + sizeof(alloc_count), pool_name); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(alloc_count, 0ull); + + ret = umfPoolFree(hPool, ptr1); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ret = umfPoolDestroy(hPool); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ret = umfPoolDestroy(hPool2); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ret = umfMemoryProviderDestroy(hProvider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); } -#endif diff --git a/test/ctl/ctl_debug.c b/test/ctl/ctl_debug.c index e8730d8967..ddff10eea6 100644 --- a/test/ctl/ctl_debug.c +++ b/test/ctl/ctl_debug.c @@ -11,6 +11,7 @@ * ctl_debug.c -- implementation of the debug CTL namespace */ +#include #include #include "ctl/ctl_internal.h" @@ -183,4 +184,7 @@ static const umf_ctl_node_t CTL_NODE(debug)[] = { */ void debug_ctl_register(struct ctl *ctl) { CTL_REGISTER_MODULE(ctl, debug); } -void initialize_debug_ctl(void) { debug_ctl_register(&ctl_debug); } +void initialize_debug_ctl(void) { + debug_ctl_register(&ctl_debug); + ctl_init(malloc, free); +} diff --git a/test/ctl/ctl_env_app.cpp b/test/ctl/ctl_env_app.cpp new file mode 100644 index 0000000000..bb068e361d --- /dev/null +++ b/test/ctl/ctl_env_app.cpp @@ -0,0 +1,177 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +static int test_env_defaults(int argc, char **argv) { + char buf[64] = {0}; + + if (argc % 2 != 0) { + std::cerr << "expected even number of arguments" << std::endl; + std::cerr << "Usage: env_defaults key1 value1 key2 value2 ..." + << std::endl; + return 1; + } + for (int i = 0; i < argc; i += 2) { + const char *key = argv[i]; + const char *value = argv[i + 1]; + if (umfCtlGet(key, buf, sizeof(buf)) != UMF_RESULT_SUCCESS) { + fprintf(stderr, "Failed to get control for '%s'\n", key); + return 1; + } + + if (strcmp(buf, value) != 0) { + std::cerr << "Expected value for '" << key << "' to be '" << value + << "', but got '" << buf << "'" << std::endl; + return 1; + } + } + return 0; +} + +static int test_logger(int argc, char **argv) { + char buf[256] = {0}; + int level = 0; + + if (argc != 2) { + std::cerr << "expected two arguments" << std::endl; + std::cerr << "Usage: logger log_output log_level" << std::endl; + return 1; + } + umfCtlGet("umf.logger.output", buf, sizeof(buf)); + if (strcmp(buf, argv[0]) != 0) { + std::cerr << "Expected log_output to be '" << argv[0] << "', but got '" + << buf << "'" << std::endl; + return 1; + } + + umfCtlGet("umf.logger.level", &level, sizeof(level)); + if (level != atoi(argv[1])) { + std::cerr << "Expected log_level to be '" << argv[1] << "', but got '" + << level << "'" << std::endl; + return 1; + } + + return 0; +} + +static int test_disjoint_pool(int argc, char **argv) { + if (argc % 2 != 0) { + std::cerr << "expected even number of arguments" << std::endl; + std::cerr << "Usage: disjoint_pool param value [param value]..." + << std::endl; + return 1; + } + + if (umfInit() != UMF_RESULT_SUCCESS) { + std::cerr << "umfInit failed" << std::endl; + return 1; + } + + int ret = 1; + umf_os_memory_provider_params_handle_t os_params = nullptr; + umf_memory_provider_handle_t provider = nullptr; + umf_memory_pool_handle_t pool = nullptr; + + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_params)) { + return 0; + } + + if (umfMemoryProviderCreate(umfOsMemoryProviderOps(), os_params, + &provider) != UMF_RESULT_SUCCESS) { + std::cerr << "Failed to create provider" << std::endl; + goto out; + } + + if (umfPoolCreate(umfDisjointPoolOps(), provider, nullptr, 0, &pool) != + UMF_RESULT_SUCCESS) { + std::cerr << "Failed to create disjoint pool" << std::endl; + goto out; + } + + for (int i = 0; i < argc; i += 2) { + const char *name = argv[i]; + const char *value = argv[i + 1]; + char path[128]; + snprintf(path, sizeof(path), "umf.pool.by_handle.{}.params.%s", name); + + if (strcmp(name, "pool_trace") == 0) { + int got = 0; + if (umfCtlGet(path, &got, sizeof(got), pool) != + UMF_RESULT_SUCCESS) { + std::cerr << "Failed to get " << name << std::endl; + goto out; + } + if (got != atoi(value)) { + std::cerr << "Expected " << name << " to be " << value + << ", but got " << got << std::endl; + goto out; + } + } else { + size_t got = 0; + if (umfCtlGet(path, &got, sizeof(got), pool) != + UMF_RESULT_SUCCESS) { + std::cerr << "Failed to get " << name << std::endl; + goto out; + } + if (got != strtoull(value, nullptr, 10)) { + std::cerr << "Expected " << name << " to be " << value + << ", but got " << got << std::endl; + goto out; + } + } + } + + ret = 0; + +out: + if (pool) { + umfPoolDestroy(pool); + } + if (provider) { + umfMemoryProviderDestroy(provider); + } + if (os_params) { + umfOsMemoryProviderParamsDestroy(os_params); + } + + return ret; +} + +int main(int argc, char **argv) { + if (argc < 2) { + std::cerr << "Usage: " << argv[0] << " args..." + << std::endl; + return 1; + } + const char *test_name = argv[1]; + argc -= 2; + argv += 2; + if (strcmp(test_name, "env_defaults") == 0) { + return test_env_defaults(argc, argv); + } + + if (strcmp(test_name, "logger") == 0) { + return test_logger(argc, argv); + } + + if (strcmp(test_name, "disjoint_pool") == 0) { + return test_disjoint_pool(argc, argv); + } + return 1; +} diff --git a/test/ctl/ctl_env_config1.cfg b/test/ctl/ctl_env_config1.cfg new file mode 100644 index 0000000000..9831dc352e --- /dev/null +++ b/test/ctl/ctl_env_config1.cfg @@ -0,0 +1,4 @@ +# Copyright (C) 2025 Intel Corporation +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +umf.pool.default.test_pool.opt_one=opt_one_value1; # test comment diff --git a/test/ctl/ctl_env_config2.cfg b/test/ctl/ctl_env_config2.cfg new file mode 100644 index 0000000000..ca2b52d625 --- /dev/null +++ b/test/ctl/ctl_env_config2.cfg @@ -0,0 +1,5 @@ +# Copyright (C) 2025 Intel Corporation +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +umf.pool.default.test_pool.opt_one=opt_one_value2; +umf.pool.default.test_pool.opt_two=opt_two_value2; diff --git a/test/ctl/ctl_env_disjoint_pool.cfg b/test/ctl/ctl_env_disjoint_pool.cfg new file mode 100644 index 0000000000..7fd2635cd9 --- /dev/null +++ b/test/ctl/ctl_env_disjoint_pool.cfg @@ -0,0 +1,7 @@ +# Copyright (C) 2025 Intel Corporation +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +umf.pool.default.disjoint.params.slab_min_size=65536; +umf.pool.default.disjoint.params.capacity=4; +umf.pool.default.disjoint.params.min_bucket_size=8; +umf.pool.default.disjoint.params.pool_trace=0; diff --git a/test/ctl/ctl_env_driver.cpp b/test/ctl/ctl_env_driver.cpp new file mode 100644 index 0000000000..63a805abe5 --- /dev/null +++ b/test/ctl/ctl_env_driver.cpp @@ -0,0 +1,145 @@ +/* + * + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + */ + +#include +#include +#ifdef _WIN32 +#include +#else +#include +#include +#endif + +#include +#include +#include + +#include "../common/base.hpp" +#include "gtest/gtest.h" + +using namespace umf_test; + +#ifndef CTL_ENV_APP +#define CTL_ENV_APP "./ctl_env_app" +#endif + +#ifndef CTL_CONF_FILE_DIR +#define CTL_CONF_FILE_DIR "./ctl" +#endif + +void set_env(std::pair env) { + const auto &name = env.first; + const auto &value = env.second; + + if (name.empty()) { + return; + } +#ifdef _WIN32 + _putenv_s(name.c_str(), value.c_str()); +#else + setenv(name.c_str(), value.c_str(), 1); +#endif +} + +static void run_case( + const std::vector> &env, + const std::vector &args) { + for (const auto &e : env) { + set_env(e); + } + +#ifdef _WIN32 + std::vector cargs; + cargs.push_back(CTL_ENV_APP); + for (const auto &s : args) { + cargs.push_back(s.c_str()); + } + + cargs.push_back(nullptr); + intptr_t status = _spawnv(_P_WAIT, CTL_ENV_APP, cargs.data()); + ASSERT_EQ(status, 0); +#else + pid_t pid = fork(); + if (pid == 0) { + std::vector cargs; + cargs.push_back(const_cast(CTL_ENV_APP)); + for (const auto &s : args) { + cargs.push_back(const_cast(s.c_str())); + } + cargs.push_back(nullptr); + execv(CTL_ENV_APP, cargs.data()); + std::cerr << "Failed to execute " << CTL_ENV_APP << std::endl; + _exit(127); + } + int status = 0; + waitpid(pid, &status, 0); + ASSERT_EQ(status, 0); +#endif + for (const auto &e : env) { + set_env({e.first, ""}); // Clear the environment variable + } +} + +TEST_F(test, ctl_env_defaults) { + run_case( + {{"UMF_CONF", "umf.pool.default.test_pool.opt_one=test_value"}}, + {"env_defaults", "umf.pool.default.test_pool.opt_one", "test_value"}); + + run_case({{"UMF_CONF", "umf.pool.default.test_pool.opt_one=second"}}, + {"env_defaults", "umf.pool.default.test_pool.opt_one", "second"}); +} + +TEST_F(test, ctl_env_file) { + std::string cfg1 = CTL_CONF_FILE_DIR "/ctl_env_config1.cfg"; + std::string cfg2 = CTL_CONF_FILE_DIR "/ctl_env_config2.cfg"; + + run_case({{"UMF_CONF_FILE", cfg1}}, + {"env_defaults", "umf.pool.default.test_pool.opt_one", + "opt_one_value1"}); + + run_case({{"UMF_CONF_FILE", cfg2}}, + {"env_defaults", "umf.pool.default.test_pool.opt_one", + "opt_one_value2", "umf.pool.default.test_pool.opt_two", + "opt_two_value2"}); +} + +TEST_F(test, ctl_env_plus_file) { + std::string cfg = CTL_CONF_FILE_DIR "/ctl_env_config2.cfg"; + + // it is expected that configuration from file will override configuration from environment variable + run_case({{"UMF_CONF_FILE", cfg}, + {"UMF_CONF", "umf.pool.default.test_pool.opt_one=first;umf.pool." + "default.test_pool.opt_three=second"}}, + {"env_defaults", "umf.pool.default.test_pool.opt_one", + "opt_one_value2", "umf.pool.default.test_pool.opt_two", + "opt_two_value2", "umf.pool.default.test_pool.opt_three", + "second"}); +} + +TEST_F(test, ctl_env_logger) { + run_case({{"UMF_CONF", "umf.logger.output=stdout;umf.logger.level=0"}}, + {"logger", "stdout", "0"}); +} + +TEST_F(test, ctl_env_disjoint_pool_env) { + run_case( + {{"UMF_CONF", "umf.pool.default.disjoint.params.slab_min_size=65536;" + "umf.pool.default.disjoint.params.capacity=4;" + "umf.pool.default.disjoint.params.min_bucket_size=8;" + "umf.pool.default.disjoint.params.pool_trace=0"}}, + {"disjoint_pool", "slab_min_size", "65536", "capacity", "4", + "min_bucket_size", "8", "pool_trace", "0"}); +} + +TEST_F(test, ctl_env_disjoint_pool_file) { + std::string cfg = CTL_CONF_FILE_DIR "/ctl_env_disjoint_pool.cfg"; + run_case({{"UMF_CONF_FILE", cfg}}, + {"disjoint_pool", "slab_min_size", "65536", "capacity", "4", + "min_bucket_size", "8", "pool_trace", "0"}); +} diff --git a/test/ctl/ctl_unittest.cpp b/test/ctl/ctl_unittest.cpp index d4b0004eea..7ae2a12148 100644 --- a/test/ctl/ctl_unittest.cpp +++ b/test/ctl/ctl_unittest.cpp @@ -179,18 +179,18 @@ TEST_F(test, ctl_debug_node_arg_invalid) { "debug.arg_test.42", CTL_QUERY_READ, &arg, sizeof(arg), empty_args); - ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); ret = ctl_query(ctl_handler, NULL, CTL_QUERY_PROGRAMMATIC, "debug.arg_test.arg_value", CTL_QUERY_READ, &arg, sizeof(arg), empty_args); - ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); ret = ctl_query(ctl_handler, NULL, CTL_QUERY_PROGRAMMATIC, "debug.arg_test.wrong_type.arg_value", CTL_QUERY_READ, &arg, sizeof(arg), empty_args); - ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); va_end(empty_args); } diff --git a/test/disjoint_pool_file_prov.cpp b/test/disjoint_pool_file_prov.cpp index 607d265e04..817a0c1088 100644 --- a/test/disjoint_pool_file_prov.cpp +++ b/test/disjoint_pool_file_prov.cpp @@ -37,7 +37,13 @@ INSTANTIATE_TEST_SUITE_P( FileWithMemoryStrategyTest, FileWithMemoryStrategyTest, ::testing::Values(UMF_COARSE_MEMORY_STRATEGY_FASTEST, UMF_COARSE_MEMORY_STRATEGY_FASTEST_BUT_ONE, - UMF_COARSE_MEMORY_STRATEGY_CHECK_ALL_SIZE)); + UMF_COARSE_MEMORY_STRATEGY_CHECK_ALL_SIZE), + ([](auto const &info) -> std::string { + const char *names[] = {"UMF_COARSE_MEMORY_STRATEGY_FASTEST", + "UMF_COARSE_MEMORY_STRATEGY_FASTEST_BUT_ONE", + "UMF_COARSE_MEMORY_STRATEGY_CHECK_ALL_SIZE"}; + return names[info.index]; + })); TEST_P(FileWithMemoryStrategyTest, disjointFileMallocPool_simple1) { umf_memory_provider_handle_t malloc_memory_provider = nullptr; diff --git a/test/ipcAPI.cpp b/test/ipcAPI.cpp index bd3f412da4..c9496f863d 100644 --- a/test/ipcAPI.cpp +++ b/test/ipcAPI.cpp @@ -128,4 +128,5 @@ INSTANTIATE_TEST_SUITE_P(umfIpcTestSuite, umfIpcTest, ::testing::Values(ipcTestParams{ umfProxyPoolOps(), nullptr, nullptr, &IPC_MOCK_PROVIDER_OPS, nullptr, nullptr, - &hostMemoryAccessor})); + &hostMemoryAccessor}), + ipcTestParamsNameGen); diff --git a/test/ipcFixtures.hpp b/test/ipcFixtures.hpp index 4c1e5e7149..104ca3c26d 100644 --- a/test/ipcFixtures.hpp +++ b/test/ipcFixtures.hpp @@ -5,21 +5,21 @@ #ifndef UMF_TEST_IPC_FIXTURES_HPP #define UMF_TEST_IPC_FIXTURES_HPP -#include "base.hpp" -#include "multithread_helpers.hpp" -#include "pool.hpp" -#include "test_helpers.h" +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include +#include "base.hpp" +#include "multithread_helpers.hpp" +#include "pool.hpp" +#include "test_helpers.h" class MemoryAccessor { public: @@ -27,6 +27,7 @@ class MemoryAccessor { virtual void fill(void *ptr, size_t size, const void *pattern, size_t pattern_size) = 0; virtual void copy(void *dst_ptr, void *src_ptr, size_t size) = 0; + virtual const char *getName() = 0; }; class HostMemoryAccessor : public MemoryAccessor { @@ -47,6 +48,8 @@ class HostMemoryAccessor : public MemoryAccessor { void copy(void *dst_ptr, void *src_ptr, size_t size) override { std::memcpy(dst_ptr, src_ptr, size); } + + const char *getName() override { return "HostMemoryAccessor"; } }; typedef void *(*pfnPoolParamsCreate)(); @@ -65,6 +68,29 @@ using ipcTestParams = pfnProviderParamsCreate, pfnProviderParamsDestroy, MemoryAccessor *>; +std::string +ipcTestParamsNameGen(const ::testing::TestParamInfo &info) { + const umf_memory_pool_ops_t *pool_ops = std::get<0>(info.param); + const umf_memory_provider_ops_t *provider_ops = std::get<3>(info.param); + + const char *poolName = NULL; + pool_ops->get_name(NULL, &poolName); + + const char *providerName = NULL; + provider_ops->get_name(NULL, &providerName); + + // if there are multiple cases with the same pool and provider combination, + // add index to the name + std::string poolParams = std::get<1>(info.param) + ? "_w_params_" + std::to_string(info.index) + : ""; + + MemoryAccessor *memAccessor = std::get<6>(info.param); + + return std::string(poolName) + poolParams + "_" + providerName + "_" + + memAccessor->getName(); +} + struct umfIpcTest : umf_test::test, ::testing::WithParamInterface { umfIpcTest() {} @@ -679,6 +705,29 @@ TEST_P(umfIpcTest, openInTwoIpcHandlers) { EXPECT_EQ(stat.closeCount, stat.openCount); } +TEST_P(umfIpcTest, PutIPCHandleAfterFree) { + constexpr size_t SIZE = 100; + umf_test::pool_unique_handle_t pool = makePool(); + ASSERT_NE(pool.get(), nullptr); + + void *ptr = umfPoolMalloc(pool.get(), SIZE); + EXPECT_NE(ptr, nullptr); + + umf_ipc_handle_t ipcHandle = nullptr; + size_t handleSize = 0; + umf_result_t ret = umfGetIPCHandle(ptr, &ipcHandle, &handleSize); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + ret = umfPoolFree(pool.get(), ptr); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + + ret = umfPutIPCHandle(ipcHandle); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + + pool.reset(nullptr); + EXPECT_EQ(stat.putCount, stat.getCount); +} + TEST_P(umfIpcTest, ConcurrentGetConcurrentPutHandles) { concurrentGetConcurrentPutHandles(false); } diff --git a/test/ipc_devdax_prov_consumer.c b/test/ipc_devdax_prov_consumer.c index 760d075c83..105ddd864b 100644 --- a/test/ipc_devdax_prov_consumer.c +++ b/test/ipc_devdax_prov_consumer.c @@ -23,13 +23,13 @@ int main(int argc, char *argv[]) { int port = atoi(argv[1]); char *path = getenv("UMF_TESTS_DEVDAX_PATH"); - if (path == NULL || path[0] == 0) { + if (path == NULL || path[0] == '\0') { fprintf(stderr, "Test skipped, UMF_TESTS_DEVDAX_PATH is not set\n"); return 0; } char *size = getenv("UMF_TESTS_DEVDAX_SIZE"); - if (size == NULL || size[0] == 0) { + if (size == NULL || size[0] == '\0') { fprintf(stderr, "Test skipped, UMF_TESTS_DEVDAX_SIZE is not set\n"); return 0; } diff --git a/test/ipc_devdax_prov_producer.c b/test/ipc_devdax_prov_producer.c index 39d5985990..2445db07ec 100644 --- a/test/ipc_devdax_prov_producer.c +++ b/test/ipc_devdax_prov_producer.c @@ -23,13 +23,13 @@ int main(int argc, char *argv[]) { int port = atoi(argv[1]); char *path = getenv("UMF_TESTS_DEVDAX_PATH"); - if (path == NULL || path[0] == 0) { + if (path == NULL || path[0] == '\0') { fprintf(stderr, "Test skipped, UMF_TESTS_DEVDAX_PATH is not set\n"); return 0; } char *size = getenv("UMF_TESTS_DEVDAX_SIZE"); - if (size == NULL || size[0] == 0) { + if (size == NULL || size[0] == '\0') { fprintf(stderr, "Test skipped, UMF_TESTS_DEVDAX_SIZE is not set\n"); return 0; } diff --git a/test/memoryPoolAPI.cpp b/test/memoryPoolAPI.cpp index 16d7afd58e..f2cfb61bb7 100644 --- a/test/memoryPoolAPI.cpp +++ b/test/memoryPoolAPI.cpp @@ -3,28 +3,35 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // This file contains tests for UMF pool API -#include "base.hpp" -#include "pool.hpp" -#include "poolFixtures.hpp" -#include "provider.hpp" -#include "provider_null.h" -#include "provider_trace.h" -#include "test_helpers.h" +#include +#include +#include +#include +#include +#include #include #include +#include #include +#include + +#ifdef UMF_POOL_JEMALLOC_ENABLED +#include +#include +#endif #ifdef UMF_PROXY_LIB_ENABLED #include #endif -#include -#include -#include -#include -#include -#include +#include "base.hpp" +#include "pool.hpp" +#include "poolFixtures.hpp" +#include "provider.hpp" +#include "provider_null.h" +#include "provider_trace.h" +#include "test_helpers.h" using umf_test::test; using namespace umf_test; @@ -79,7 +86,8 @@ TEST_P(umfPoolWithCreateFlagsTest, memoryPoolTrace) { size_t tmpSize; umfPoolMallocUsableSize(tracingPool.get(), nullptr, &tmpSize); - // we ignore return value of poolMallocUsabeSize(), as it might be not supported + // we ignore return value of umfPoolMallocUsableSize(), as it might be not + // supported ASSERT_EQ(poolCalls["malloc_usable_size"], 1UL); ASSERT_EQ(poolCalls.size(), ++pool_call_count); @@ -112,6 +120,12 @@ TEST_P(umfPoolWithCreateFlagsTest, memoryPoolTrace) { ASSERT_EQ(poolCalls["get_last_native_error"], 1UL); ASSERT_EQ(poolCalls.size(), ++pool_call_count); + umfPoolTrimMemory(tracingPool.get(), 0); + // we ignore return value of umfPoolTrimMemory(), as it might be not + // supported + ASSERT_EQ(poolCalls["trim_memory"], 1UL); + ASSERT_EQ(poolCalls.size(), ++pool_call_count); + if (manuallyDestroyProvider) { umfMemoryProviderDestroy(provider); } @@ -126,6 +140,13 @@ TEST_P(umfPoolWithCreateFlagsTest, memoryPoolWithCustomProvider) { EXPECT_NE_NOEXCEPT(provider, nullptr); return UMF_RESULT_SUCCESS; } + + umf_result_t get_name(const char **name) noexcept { + if (name) { + *name = "pool"; + } + return UMF_RESULT_SUCCESS; + } }; umf_memory_pool_ops_t pool_ops = umf_test::poolMakeCOps(); @@ -300,6 +321,31 @@ TEST_F(tagTest, SetAndGetInvalidPool) { ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); } +#ifdef UMF_POOL_JEMALLOC_ENABLED +static void *createOsMemoryProviderParams() { + umf_os_memory_provider_params_handle_t params = nullptr; + umf_result_t res = umfOsMemoryProviderParamsCreate(¶ms); + if (res != UMF_RESULT_SUCCESS) { + throw std::runtime_error("Failed to create os memory provider params"); + } + + return params; +} + +static umf_result_t destroyOsMemoryProviderParams(void *params) { + return umfOsMemoryProviderParamsDestroy( + (umf_os_memory_provider_params_handle_t)params); +} + +INSTANTIATE_TEST_SUITE_P( + jemallocPoolTest, umfPoolTest, + ::testing::Values(poolCreateExtParams{ + umfJemallocPoolOps(), nullptr, nullptr, umfOsMemoryProviderOps(), + createOsMemoryProviderParams, destroyOsMemoryProviderParams}), + poolCreateExtParamsNameGen); + +#endif /* UMF_POOL_JEMALLOC_ENABLED */ + INSTANTIATE_TEST_SUITE_P( mallocPoolTest, umfPoolTest, ::testing::Values( @@ -309,16 +355,31 @@ INSTANTIATE_TEST_SUITE_P( &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr}, poolCreateExtParams{umfDisjointPoolOps(), defaultDisjointPoolConfig, defaultDisjointPoolConfigDestroy, - &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr})); + &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr}), + poolCreateExtParamsNameGen); + +#ifdef UMF_POOL_SCALABLE_ENABLED +INSTANTIATE_TEST_SUITE_P(mallocPoolTestScalable, umfPoolTest, + ::testing::Values(poolCreateExtParams{ + umfScalablePoolOps(), nullptr, nullptr, + &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr}), + poolCreateExtParamsNameGen); +#endif INSTANTIATE_TEST_SUITE_P(mallocMultiPoolTest, umfMultiPoolTest, ::testing::Values(poolCreateExtParams{ umfProxyPoolOps(), nullptr, nullptr, - &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr})); + &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr}), + poolCreateExtParamsNameGen); INSTANTIATE_TEST_SUITE_P(umfPoolWithCreateFlagsTest, umfPoolWithCreateFlagsTest, ::testing::Values(0, - UMF_POOL_CREATE_FLAG_OWN_PROVIDER)); + UMF_POOL_CREATE_FLAG_OWN_PROVIDER), + ([](auto const &info) -> std::string { + static const char *names[] = { + "NONE", "UMF_POOL_CREATE_FLAG_OWN_PROVIDER"}; + return names[info.index]; + })); ////////////////// Negative test cases ///////////////// @@ -382,7 +443,14 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values(UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY, UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC, UMF_RESULT_ERROR_INVALID_ARGUMENT, - UMF_RESULT_ERROR_UNKNOWN)); + UMF_RESULT_ERROR_UNKNOWN), + ([](auto const &info) -> std::string { + static const char *names[] = { + "UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY", + "UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC", + "UMF_RESULT_ERROR_INVALID_ARGUMENT", "UMF_RESULT_ERROR_UNKNOWN"}; + return names[info.index]; + })); TEST_P(poolInitializeTest, errorPropagation) { auto nullProvider = umf_test::wrapProviderUnique(nullProviderCreate()); @@ -559,4 +627,19 @@ INSTANTIATE_TEST_SUITE_P( umf_test::withGeneratedArgs(umfPoolGetMemoryProvider), umf_test::withGeneratedArgs(umfPoolByPtr), umf_test::withGeneratedArgs(umfPoolSetTag), - umf_test::withGeneratedArgs(umfPoolGetTag))); + umf_test::withGeneratedArgs(umfPoolGetTag)), + ([](auto const &info) -> std::string { + static const char *names[] = {"umfPoolMalloc", + "umfPoolAlignedMalloc", + "umfPoolFree", + "umfPoolCalloc", + "umfPoolRealloc", + "umfPoolMallocUsableSize", + "umfPoolGetLastAllocationError", + "umfPoolGetName", + "umfPoolGetMemoryProvider", + "umfPoolByPtr", + "umfPoolSetTag", + "umfPoolGetTag"}; + return names[info.index]; + })); diff --git a/test/memoryProviderAPI.cpp b/test/memoryProviderAPI.cpp index 33e298dc6e..5cbc8c2cac 100644 --- a/test/memoryProviderAPI.cpp +++ b/test/memoryProviderAPI.cpp @@ -3,15 +3,16 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // This file contains tests for UMF provider API -#include "provider.hpp" -#include "provider_null.h" -#include "test_helpers.h" - #include #include #include #include +#include "memory_properties_internal.h" +#include "provider.hpp" +#include "provider_null.h" +#include "test_helpers.h" + using umf_test::test; TEST_F(test, memoryProviderTrace) { @@ -26,7 +27,7 @@ TEST_F(test, memoryProviderTrace) { auto tracingProvider = umf_test::wrapProviderUnique( traceProviderCreate(nullProvider, true, &calls, trace)); - size_t call_count = 0; + size_t call_count = 1; // get_name is called during initialization void *ptr; auto ret = umfMemoryProviderAlloc(tracingProvider.get(), 0, 0, &ptr); @@ -64,8 +65,8 @@ TEST_F(test, memoryProviderTrace) { ret = umfMemoryProviderGetName(tracingProvider.get(), &pName); ASSERT_EQ(ret, UMF_RESULT_SUCCESS); ASSERT_NE(pName, nullptr); - ASSERT_EQ(calls["name"], 1UL); - ASSERT_EQ(calls.size(), ++call_count); + ASSERT_EQ(calls["name"], 2UL); + ASSERT_EQ(calls.size(), call_count); ASSERT_EQ(std::string(pName), std::string("null")); ret = umfMemoryProviderPurgeLazy(tracingProvider.get(), &page_size, @@ -338,7 +339,14 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values(UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY, UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC, UMF_RESULT_ERROR_INVALID_ARGUMENT, - UMF_RESULT_ERROR_UNKNOWN)); + UMF_RESULT_ERROR_UNKNOWN), + ([](auto const &info) -> std::string { + static const char *names[] = { + "UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY", + "UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC", + "UMF_RESULT_ERROR_INVALID_ARGUMENT", "UMF_RESULT_ERROR_UNKNOWN"}; + return names[info.index]; + })); TEST_P(providerInitializeTest, errorPropagation) { struct provider : public umf_test::provider_base_t { @@ -389,4 +397,14 @@ INSTANTIATE_TEST_SUITE_P( umf_test::withGeneratedArgs(umfMemoryProviderGetMinPageSize), umf_test::withGeneratedArgs(umfMemoryProviderPurgeLazy), umf_test::withGeneratedArgs(umfMemoryProviderPurgeForce), - umf_test::withGeneratedArgs(umfMemoryProviderGetName))); + umf_test::withGeneratedArgs(umfMemoryProviderGetName)), + ([](auto const &info) -> std::string { + static const char *names[] = {"umfMemoryProviderAlloc", + "umfMemoryProviderFree", + "umfMemoryProviderGetRecommendedPageSize", + "umfMemoryProviderGetMinPageSize", + "umfMemoryProviderPurgeLazy", + "umfMemoryProviderPurgeForce", + "umfMemoryProviderGetName"}; + return names[info.index]; + })); diff --git a/test/memspaces/memspace_highest_bandwidth.cpp b/test/memspaces/memspace_highest_bandwidth.cpp index 5bedac0eae..5ad19baedb 100644 --- a/test/memspaces/memspace_highest_bandwidth.cpp +++ b/test/memspaces/memspace_highest_bandwidth.cpp @@ -40,16 +40,25 @@ static void canQueryBandwidth(size_t nodeId) { } } -INSTANTIATE_TEST_SUITE_P(memspaceLowestLatencyTest, memspaceGetTest, - ::testing::Values(memspaceGetParams{ - canQueryBandwidth, - umfMemspaceHighestBandwidthGet})); - -INSTANTIATE_TEST_SUITE_P(memspaceLowestLatencyProviderTest, - memspaceProviderTest, - ::testing::Values(memspaceGetParams{ - canQueryBandwidth, - umfMemspaceHighestBandwidthGet})); +INSTANTIATE_TEST_SUITE_P( + memspaceLowestLatencyTest, memspaceGetTest, + ::testing::Values(memspaceGetParams{canQueryBandwidth, + umfMemspaceHighestBandwidthGet}), + ([](auto const &info) -> std::string { + static const char *names[] = {"canQueryBandwidth", + "umfMemspaceHighestBandwidthGet"}; + return names[info.index]; + })); + +INSTANTIATE_TEST_SUITE_P( + memspaceLowestLatencyProviderTest, memspaceProviderTest, + ::testing::Values(memspaceGetParams{canQueryBandwidth, + umfMemspaceHighestBandwidthGet}), + ([](auto const &info) -> std::string { + static const char *names[] = {"canQueryBandwidth", + "umfMemspaceHighestBandwidthGet"}; + return names[info.index]; + })); TEST_F(numaNodesTest, PerCoreBandwidthPlacement) { const size_t allocSize = 4096; diff --git a/test/memspaces/memspace_lowest_latency.cpp b/test/memspaces/memspace_lowest_latency.cpp index 02fdd481ef..78e8412de1 100644 --- a/test/memspaces/memspace_lowest_latency.cpp +++ b/test/memspaces/memspace_lowest_latency.cpp @@ -41,9 +41,21 @@ static void canQueryLatency(size_t nodeId) { INSTANTIATE_TEST_SUITE_P(memspaceLowestLatencyTest, memspaceGetTest, ::testing::Values(memspaceGetParams{ - canQueryLatency, umfMemspaceLowestLatencyGet})); + canQueryLatency, umfMemspaceLowestLatencyGet}), + ([](auto const &info) -> std::string { + static const char *names[] = { + "canQueryLatency", + "umfMemspaceLowestLatencyGet"}; + return names[info.index]; + })); INSTANTIATE_TEST_SUITE_P(memspaceLowestLatencyProviderTest, memspaceProviderTest, ::testing::Values(memspaceGetParams{ - canQueryLatency, umfMemspaceLowestLatencyGet})); + canQueryLatency, umfMemspaceLowestLatencyGet}), + ([](auto const &info) -> std::string { + static const char *names[] = { + "canQueryLatency", + "umfMemspaceLowestLatencyGet"}; + return names[info.index]; + })); diff --git a/test/poolFixtures.hpp b/test/poolFixtures.hpp index 98778cd562..2b42750239 100644 --- a/test/poolFixtures.hpp +++ b/test/poolFixtures.hpp @@ -23,17 +23,6 @@ #include "provider.hpp" #include "utils/utils_sanitizers.h" -typedef void *(*pfnPoolParamsCreate)(); -typedef umf_result_t (*pfnPoolParamsDestroy)(void *); - -typedef void *(*pfnProviderParamsCreate)(); -typedef umf_result_t (*pfnProviderParamsDestroy)(void *); - -using poolCreateExtParams = - std::tuple; - umf_test::pool_unique_handle_t poolCreateExtUnique(poolCreateExtParams params) { auto [pool_ops, poolParamsCreate, poolParamsDestroy, provider_ops, providerParamsCreate, providerParamsDestroy] = params; @@ -51,6 +40,10 @@ umf_test::pool_unique_handle_t poolCreateExtUnique(poolCreateExtParams params) { &upstream_provider); EXPECT_EQ(ret, UMF_RESULT_SUCCESS); EXPECT_NE(upstream_provider, nullptr); + if (ret != UMF_RESULT_SUCCESS || upstream_provider == nullptr) { + assert(false && "Failed to create a memory provider"); + return umf_test::pool_unique_handle_t(nullptr, nullptr); + } provider = upstream_provider; @@ -65,6 +58,10 @@ umf_test::pool_unique_handle_t poolCreateExtUnique(poolCreateExtParams params) { UMF_POOL_CREATE_FLAG_OWN_PROVIDER, &hPool); EXPECT_EQ(ret, UMF_RESULT_SUCCESS); EXPECT_NE(hPool, nullptr); + if (ret != UMF_RESULT_SUCCESS || hPool == nullptr) { + assert(false && "Failed to create a memory pool"); + return umf_test::pool_unique_handle_t(nullptr, nullptr); + } // we do not need params anymore if (poolParamsDestroy) { @@ -133,6 +130,11 @@ GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(umfMemTest); GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(umfPoolTest); GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(umfMultiPoolTest); +TEST_P(umfPoolTest, destroyNullptr) { + auto ret = umfPoolDestroy(nullptr); + ASSERT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); +} + TEST_P(umfPoolTest, allocFree) { static constexpr size_t allocSize = 64; auto *ptr = umfPoolMalloc(pool.get(), allocSize); @@ -403,6 +405,32 @@ TEST_P(umfPoolTest, multiThreadedMallocFreeRandomSizes) { } } +TEST_P(umfPoolTest, trimMemory) { + constexpr size_t size = 1024; + + umf_memory_pool_handle_t hPool = pool.get(); + void *ptr = umfPoolMalloc(hPool, size); + ASSERT_NE(ptr, nullptr); + + umf_result_t ret = umfPoolFree(hPool, ptr); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + size_t reserved_memory1 = 0; + ret = umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", + &reserved_memory1, sizeof(size_t), hPool); + ASSERT_GE(reserved_memory1, 0ull); + + // if supported, call to umfPoolTrimMemory should purge the whole memory + // pool + ret = umfPoolTrimMemory(hPool, 0); + if (ret == UMF_RESULT_SUCCESS) { + size_t reserved_memory2 = 0; + ret = umfCtlGet("umf.pool.by_handle.{}.stats.reserved_memory", + &reserved_memory2, sizeof(size_t), hPool); + ASSERT_EQ(reserved_memory2, 0ull); + } +} + TEST_P(umfMemTest, outOfMem) { static constexpr size_t allocSize = 4096; auto hPool = pool.get(); diff --git a/test/pools/disjoint_pool.cpp b/test/pools/disjoint_pool.cpp index 92ccd0410a..065b457de0 100644 --- a/test/pools/disjoint_pool.cpp +++ b/test/pools/disjoint_pool.cpp @@ -4,6 +4,7 @@ #include +#include #include #include #include @@ -14,10 +15,13 @@ #include "provider.hpp" #include "provider_null.h" #include "provider_trace.h" +#include "umf/memory_provider.h" using umf_test::test; using namespace umf_test; +static void get_test_va_list(va_list *a, ...) { va_start(*a, a); } + TEST_F(test, internals) { static umf_result_t expectedResult = UMF_RESULT_SUCCESS; struct memory_provider : public umf_test::provider_base_t { @@ -64,6 +68,12 @@ TEST_F(test, internals) { disjoint_pool_t *pool; umf_result_t res = ops->initialize(provider_handle, params, (void **)&pool); EXPECT_EQ(res, UMF_RESULT_SUCCESS); + va_list empty_args; + get_test_va_list(&empty_args); + res = ops->ext_ctl((void *)pool, CTL_QUERY_PROGRAMMATIC, "post_initialize", + nullptr, 0, CTL_QUERY_RUNNABLE, empty_args); + va_end(empty_args); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); EXPECT_NE(pool, nullptr); EXPECT_EQ(pool->provider_min_page_size, (size_t)1024); @@ -274,6 +284,97 @@ TEST_F(test, sharedLimits) { EXPECT_EQ(MaxSize / SlabMinSize * 2, numFrees); } +TEST_F(test, disjointPoolTrim) { + struct memory_provider : public umf_test::provider_base_t { + umf_result_t alloc(size_t size, size_t alignment, void **ptr) noexcept { + *ptr = umf_ba_global_aligned_alloc(size, alignment); + return UMF_RESULT_SUCCESS; + } + + umf_result_t free(void *ptr, size_t) noexcept { + umf_ba_global_free(ptr); + return UMF_RESULT_SUCCESS; + } + }; + + umf_memory_provider_ops_t provider_ops = + umf_test::providerMakeCOps(); + + auto providerUnique = + wrapProviderUnique(createProviderChecked(&provider_ops, nullptr)); + + umf_memory_provider_handle_t provider_handle; + provider_handle = providerUnique.get(); + + umf_disjoint_pool_params_handle_t params = + (umf_disjoint_pool_params_handle_t)defaultDisjointPoolConfig(); + params->pool_trace = 3; + // Set the slab min size to 64 so allocating 64 bytes will use the whole + // slab. + params->slab_min_size = 64; + params->capacity = 4; + + // in "internals" test we use ops interface to directly manipulate the pool + // structure + const umf_memory_pool_ops_t *ops = umfDisjointPoolOps(); + EXPECT_NE(ops, nullptr); + + disjoint_pool_t *pool; + umf_result_t res = ops->initialize(provider_handle, params, (void **)&pool); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_NE(pool, nullptr); + + va_list empty_args; + get_test_va_list(&empty_args); + res = ops->ext_ctl((void *)pool, CTL_QUERY_PROGRAMMATIC, "post_initialize", + nullptr, 0, CTL_QUERY_RUNNABLE, empty_args); + va_end(empty_args); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + + // do 4 allocs, then free all of them + size_t size = 64; + void *ptrs[4] = {0}; + ptrs[0] = ops->malloc(pool, size); + EXPECT_NE(ptrs[0], nullptr); + ptrs[1] = ops->malloc(pool, size); + EXPECT_NE(ptrs[1], nullptr); + ptrs[2] = ops->malloc(pool, size); + EXPECT_NE(ptrs[2], nullptr); + ptrs[3] = ops->malloc(pool, size); + EXPECT_NE(ptrs[3], nullptr); + + ops->free(pool, ptrs[0]); + ops->free(pool, ptrs[1]); + ops->free(pool, ptrs[2]); + ops->free(pool, ptrs[3]); + + // Because we set the slab min size to 64, each allocation should go to the + // separate slab. Additionally, because we set the capacity to 4, all slabs + // should still be in the pool available for new allocations. + EXPECT_EQ(pool->buckets[0]->available_slabs_num, (size_t)4); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_use, (size_t)0); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_pool, (size_t)4); + + // Trim memory - leave 3 slabs in the pool + ops->ext_trim_memory(pool, 3 * pool->buckets[0]->size); + EXPECT_EQ(pool->buckets[0]->available_slabs_num, (size_t)3); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_use, (size_t)0); + + // Trim memory again - leave 1 slab in the pool + ops->ext_trim_memory(pool, pool->buckets[0]->size); + EXPECT_EQ(pool->buckets[0]->available_slabs_num, (size_t)1); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_use, (size_t)0); + + // Trim the rest of memory + ops->ext_trim_memory(pool, 0); + EXPECT_EQ(pool->buckets[0]->available_slabs_num, (size_t)0); + EXPECT_EQ(pool->buckets[0]->curr_slabs_in_pool, (size_t)0); + + ops->finalize(pool); + res = umfDisjointPoolParamsDestroy(params); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); +} + TEST_F(test, disjointPoolNullParams) { umf_result_t res = umfDisjointPoolParamsCreate(nullptr); EXPECT_EQ(res, UMF_RESULT_ERROR_INVALID_ARGUMENT); @@ -336,31 +437,56 @@ TEST_F(test, disjointPoolName) { umf_disjoint_pool_params_handle_t params = nullptr; umf_result_t res = umfDisjointPoolParamsCreate(¶ms); EXPECT_EQ(res, UMF_RESULT_SUCCESS); - umf_memory_provider_handle_t provider_handle = nullptr; umf_memory_pool_handle_t pool = NULL; - struct memory_provider : public umf_test::provider_base_t {}; + auto nullProvider = nullProviderCreate(); - umf_memory_provider_ops_t provider_ops = - umf_test::providerMakeCOps(); + res = umfPoolCreate(umfDisjointPoolOps(), nullProvider, params, 0, &pool); - auto providerUnique = - wrapProviderUnique(createProviderChecked(&provider_ops, nullptr)); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + const char *name = nullptr; + res = umfPoolGetName(pool, &name); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "disjoint"); - provider_handle = providerUnique.get(); + umfPoolDestroy(pool); + umfMemoryProviderDestroy(nullProvider); + umfDisjointPoolParamsDestroy(params); +} - res = - umfPoolCreate(umfDisjointPoolOps(), provider_handle, params, 0, &pool); +TEST_F(test, disjointPoolCustomName) { + umf_disjoint_pool_params_handle_t params = nullptr; + umf_result_t res = umfDisjointPoolParamsCreate(¶ms); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + + res = umfDisjointPoolParamsSetName(params, "my_disjoint"); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + + struct memory_provider : public umf_test::provider_base_t {}; + + auto nullProvider = nullProviderCreate(); + umf_memory_pool_handle_t pool = NULL; + + res = umfPoolCreate(umfDisjointPoolOps(), nullProvider, params, 0, &pool); EXPECT_EQ(res, UMF_RESULT_SUCCESS); + const char *name = nullptr; res = umfPoolGetName(pool, &name); EXPECT_EQ(res, UMF_RESULT_SUCCESS); - EXPECT_STREQ(name, "disjoint"); + EXPECT_STREQ(name, "my_disjoint"); umfPoolDestroy(pool); + umfMemoryProviderDestroy(nullProvider); umfDisjointPoolParamsDestroy(params); } +TEST(DisjointPoolOps, default_name_null_handle) { + const char *name = nullptr; + EXPECT_EQ(umfDisjointPoolOps()->get_name(nullptr, &name), + UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "disjoint"); +} + TEST_F(test, disjointPoolDefaultParams) { // Disjoint pool defaults static constexpr size_t DefaultSlabMinSize = 64 * 1024; // 64K @@ -499,7 +625,8 @@ INSTANTIATE_TEST_SUITE_P(disjointPoolTests, umfPoolTest, ::testing::Values(poolCreateExtParams{ umfDisjointPoolOps(), defaultDisjointPoolConfig, defaultDisjointPoolConfigDestroy, - &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr})); + &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr}), + poolCreateExtParamsNameGen); void *memProviderParams() { return (void *)&DEFAULT_DISJOINT_CAPACITY; } @@ -510,10 +637,15 @@ INSTANTIATE_TEST_SUITE_P( defaultDisjointPoolConfigDestroy, &MOCK_OUT_OF_MEM_PROVIDER_OPS, memProviderParams, nullptr}, - static_cast(DEFAULT_DISJOINT_CAPACITY) / 2))); + static_cast(DEFAULT_DISJOINT_CAPACITY) / 2)), + ([](auto) { + return std::string("disjoint_out_of_mem_capacity_") + + std::to_string(static_cast(DEFAULT_DISJOINT_CAPACITY) / 2); + })); INSTANTIATE_TEST_SUITE_P(disjointMultiPoolTests, umfMultiPoolTest, ::testing::Values(poolCreateExtParams{ umfDisjointPoolOps(), defaultDisjointPoolConfig, defaultDisjointPoolConfigDestroy, - &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr})); + &BA_GLOBAL_PROVIDER_OPS, nullptr, nullptr}), + poolCreateExtParamsNameGen); diff --git a/test/pools/disjoint_pool_ctl.cpp b/test/pools/disjoint_pool_ctl.cpp index 5de142d323..31bc975e3d 100644 --- a/test/pools/disjoint_pool_ctl.cpp +++ b/test/pools/disjoint_pool_ctl.cpp @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exceptiongi #include +#include #include #include #include @@ -10,9 +11,13 @@ #include #include +#include +#include #include #include "base.hpp" +#include "common/fork_helpers.hpp" +#include "ctl/ctl_internal.h" #include "utils_assert.h" #include "utils_log.h" @@ -89,73 +94,6 @@ class ProviderWrapper { void *m_params; }; -TEST_F(test, DISABLED_disjointCtlName) { - umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; - if (UMF_RESULT_ERROR_NOT_SUPPORTED == - umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } - - ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), - os_memory_provider_params); - if (providerWrapper.get() == NULL) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } - - // Set default name - const char *val = "disjoint_new_name"; - ASSERT_SUCCESS( - umfCtlSet("umf.pool.default.disjoint.name", (void *)val, strlen(val))); - - umf_disjoint_pool_params_handle_t params = nullptr; - ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); - PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), - params); - - // Check that the default name is correctly set - const char *name = NULL; - ASSERT_SUCCESS(umfPoolGetName(poolWrapper.get(), &name)); - ASSERT_STREQ(name, val); - - // Clean up - ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); - ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); -} - -TEST_F(test, DISABLED_disjointCtlChangeNameTwice) { - umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; - if (UMF_RESULT_ERROR_NOT_SUPPORTED == - umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } - ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), - os_memory_provider_params); - if (providerWrapper.get() == NULL) { - GTEST_SKIP() << "OS memory provider is not supported!"; - } - // Set default name - const char *val = "disjoint_new_name"; - const char *val2 = "another_name"; - ASSERT_SUCCESS( - umfCtlSet("umf.pool.default.disjoint.name", (void *)val, strlen(val))); - ASSERT_SUCCESS(umfCtlSet("umf.pool.default.disjoint.name", (void *)val2, - strlen(val2))); - - umf_disjoint_pool_params_handle_t params = nullptr; - ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); - PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), - params); - - // Check that the default name is correctly set - const char *name = NULL; - ASSERT_SUCCESS(umfPoolGetName(poolWrapper.get(), &name)); - ASSERT_STREQ(name, val2); - - // Clean up - ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); - ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); -} - TEST_F(test, disjointCtlUsedMemory) { umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; if (UMF_RESULT_ERROR_NOT_SUPPORTED == @@ -302,6 +240,227 @@ TEST_F(test, disjointCtlReservedMemory) { ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); } +TEST_F(test, disjointCtlGetParams) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + + const size_t slab_min_size = 32 * 1024; + const size_t max_poolable_size = 512 * 1024; + const size_t capacity = 7; + const size_t min_bucket_size = 16; + const int pool_trace = 1; + + ASSERT_SUCCESS(umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size)); + ASSERT_SUCCESS( + umfDisjointPoolParamsSetMaxPoolableSize(params, max_poolable_size)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, capacity)); + ASSERT_SUCCESS( + umfDisjointPoolParamsSetMinBucketSize(params, min_bucket_size)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetTrace(params, pool_trace)); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + + size_t got_size = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.slab_min_size", + &got_size, sizeof(got_size), poolWrapper.get())); + EXPECT_EQ(got_size, slab_min_size); + + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.max_poolable_size", + &got_size, sizeof(got_size), poolWrapper.get())); + EXPECT_EQ(got_size, max_poolable_size); + + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.capacity", &got_size, + sizeof(got_size), poolWrapper.get())); + EXPECT_EQ(got_size, capacity); + + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", + &got_size, sizeof(got_size), poolWrapper.get())); + EXPECT_EQ(got_size, min_bucket_size); + + int got_trace = 0; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.params.pool_trace", + &got_trace, sizeof(got_trace), poolWrapper.get())); + EXPECT_EQ(got_trace, pool_trace); + + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +} + +TEST_F(test, disjointCtlDefaultsOverride) { + umf_test::run_in_fork([] { + umf_os_memory_provider_params_handle_t raw_os_params = nullptr; + umf_result_t res = umfOsMemoryProviderParamsCreate(&raw_os_params); + if (res == UMF_RESULT_ERROR_NOT_SUPPORTED) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + + std::unique_ptr + os_params(raw_os_params, &umfOsMemoryProviderParamsDestroy); + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_params.get()); + if (providerWrapper.get() == nullptr) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + size_t default_capacity = 4; + size_t default_min_bucket = 8; + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &default_capacity, sizeof(default_capacity)), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &default_min_bucket, sizeof(default_min_bucket)), + UMF_RESULT_SUCCESS); + + size_t override_capacity = 2; + size_t override_min_bucket = 32; + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &override_capacity, sizeof(override_capacity)), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &override_min_bucket, sizeof(override_min_bucket)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t raw_params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(&raw_params), UMF_RESULT_SUCCESS); + std::unique_ptr + params(raw_params, &umfDisjointPoolParamsDestroy); + + ASSERT_EQ(umfDisjointPoolParamsSetCapacity(params.get(), 1), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfDisjointPoolParamsSetMinBucketSize(params.get(), 64), + UMF_RESULT_SUCCESS); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params.get()); + ASSERT_NE(poolWrapper.get(), nullptr); + + size_t value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.capacity", &value, + sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, override_capacity); + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", + &value, sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, override_min_bucket); + + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &default_capacity, sizeof(default_capacity)), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &default_min_bucket, sizeof(default_min_bucket)), + UMF_RESULT_SUCCESS); + }); +} + +TEST_F(test, disjointCtlDefaultsCustomName) { + umf_test::run_in_fork([] { + umf_os_memory_provider_params_handle_t raw_os_params = nullptr; + umf_result_t res = umfOsMemoryProviderParamsCreate(&raw_os_params); + if (res == UMF_RESULT_ERROR_NOT_SUPPORTED) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + + std::unique_ptr + os_params(raw_os_params, &umfOsMemoryProviderParamsDestroy); + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_params.get()); + if (providerWrapper.get() == nullptr) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + const std::string custom_name = "custom_disjoint_pool"; + + size_t canonical_capacity = 9; + size_t canonical_min_bucket = 32; + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &canonical_capacity, sizeof(canonical_capacity)), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &canonical_min_bucket, + sizeof(canonical_min_bucket)), + UMF_RESULT_SUCCESS); + + size_t custom_capacity = 11; + size_t custom_min_bucket = 64; + const std::string custom_capacity_path = + "umf.pool.default." + custom_name + ".params.capacity"; + const std::string custom_min_bucket_path = + "umf.pool.default." + custom_name + ".params.min_bucket_size"; + ASSERT_EQ(umfCtlSet(custom_capacity_path.c_str(), &custom_capacity, + sizeof(custom_capacity)), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfCtlSet(custom_min_bucket_path.c_str(), &custom_min_bucket, + sizeof(custom_min_bucket)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t raw_custom_params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(&raw_custom_params), + UMF_RESULT_SUCCESS); + std::unique_ptr + custom_params(raw_custom_params, &umfDisjointPoolParamsDestroy); + + ASSERT_EQ(umfDisjointPoolParamsSetName(custom_params.get(), + custom_name.c_str()), + UMF_RESULT_SUCCESS); + + PoolWrapper customPool(providerWrapper.get(), umfDisjointPoolOps(), + custom_params.get()); + ASSERT_NE(customPool.get(), nullptr); + + size_t value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.capacity", &value, + sizeof(value), customPool.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, custom_capacity); + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", + &value, sizeof(value), customPool.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, custom_min_bucket); + + umf_disjoint_pool_params_handle_t raw_default_params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(&raw_default_params), + UMF_RESULT_SUCCESS); + std::unique_ptr + default_params(raw_default_params, &umfDisjointPoolParamsDestroy); + + PoolWrapper defaultPool(providerWrapper.get(), umfDisjointPoolOps(), + default_params.get()); + ASSERT_NE(defaultPool.get(), nullptr); + + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.capacity", &value, + sizeof(value), defaultPool.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, canonical_capacity); + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", + &value, sizeof(value), defaultPool.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, canonical_min_bucket); + }); +} + TEST_F(test, disjointCtlMemoryMetricsConsistency) { umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; if (UMF_RESULT_ERROR_NOT_SUPPORTED == @@ -421,3 +580,420 @@ TEST_F(test, disjointCtlMemoryMetricsInvalidArgs) { ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); } + +TEST_F(test, disjointCtlBucketStats) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + + // Set minimum slab size + size_t slab_min_size = 64 * 1024; + ASSERT_SUCCESS(umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, 4)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetTrace(params, 3)); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + + size_t arg = 0; + size_t count = 0; + const size_t alloc_size = 128; + size_t used_bucket = SIZE_MAX; + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.buckets.count", &count, + sizeof(count), poolWrapper.get())); + EXPECT_GE(count, 0ull); + + auto expected_bucket_size = [](size_t i) -> size_t { + // Even indexes: 8 << (i/2) => 8,16,32,64,... + // Odd indexes: 12 << (i/2) => 12,24,48,96,... + return (i % 2 == 0) ? (size_t(8) << (i / 2)) : (size_t(12) << (i / 2)); + }; + + for (size_t i = 0; i < count; i++) { + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.buckets.{}.size", &arg, + sizeof(arg), poolWrapper.get(), i)); + EXPECT_EQ(arg, expected_bucket_size(i)) << "Failed for bucket: " << i; + if (arg >= alloc_size && used_bucket == SIZE_MAX) { + used_bucket = i; // Find the bucket that matches alloc_size + } + } + + std::unordered_map stats = { + {"alloc_num", 0ull}, {"alloc_pool_num", 0ull}, + {"free_num", 0ull}, {"curr_slabs_in_use", 0ull}, + {"curr_slabs_in_pool", 0ull}, {"max_slabs_in_use", 0ull}, + {"max_slabs_in_pool", 0ull}, + }; + + for (const auto &s : stats) { + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.{}", &arg, + sizeof(arg), poolWrapper.get(), + s.first.c_str())); + EXPECT_EQ(arg, s.second) << "Failed for stat: " << s.first; + } + + for (size_t i = 0; i < count; i++) { + for (const auto &s : stats) { + ASSERT_SUCCESS( + umfCtlGet("umf.pool.by_handle.{}.buckets.{}.stats.{}", &arg, + sizeof(arg), poolWrapper.get(), i, s.first.c_str())); + EXPECT_EQ(arg, i == used_bucket ? s.second : 0) + << "Failed for stat: " << s.first << " bucket: " << i; + } + } + + const size_t n_allocations = 10; // Number of allocations + + // Allocate memory + std::vector ptrs; + for (size_t i = 0; i < n_allocations; i++) { + void *ptr = umfPoolMalloc(poolWrapper.get(), alloc_size); + ASSERT_NE(ptr, nullptr); + ptrs.push_back(ptr); + } + + stats = { + {"alloc_num", 10ull}, {"alloc_pool_num", 9ull}, + {"free_num", 0ull}, {"curr_slabs_in_use", 1ull}, + {"curr_slabs_in_pool", 0ull}, {"max_slabs_in_use", 1ull}, + {"max_slabs_in_pool", 0ull}, + }; + + for (const auto &s : stats) { + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.{}", &arg, + sizeof(arg), poolWrapper.get(), + s.first.c_str())); + EXPECT_EQ(arg, s.second) << "Failed for stat: " << s.first; + } + for (size_t i = 0; i < count; i++) { + for (const auto &s : stats) { + ASSERT_SUCCESS( + umfCtlGet("umf.pool.by_handle.{}.buckets.{}.stats.{}", &arg, + sizeof(arg), poolWrapper.get(), i, s.first.c_str())); + EXPECT_EQ(arg, i == used_bucket ? s.second : 0) + << "Failed for stat: " << s.first << " bucket: " << i; + } + } + + // Free all memory + for (void *ptr : ptrs) { + ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr)); + } + + stats = { + {"alloc_num", 10ull}, {"alloc_pool_num", 9ull}, + {"free_num", 10ull}, {"curr_slabs_in_use", 0ull}, + {"curr_slabs_in_pool", 1ull}, {"max_slabs_in_use", 1ull}, + {"max_slabs_in_pool", 1ull}, + }; + + for (const auto &s : stats) { + ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.{}.stats.{}", &arg, + sizeof(arg), poolWrapper.get(), + s.first.c_str())); + EXPECT_EQ(arg, s.second) << "Failed for stat: " << s.first; + } + + for (size_t i = 0; i < count; i++) { + for (const auto &s : stats) { + ASSERT_SUCCESS( + umfCtlGet("umf.pool.by_handle.{}.buckets.{}.stats.{}", &arg, + sizeof(arg), poolWrapper.get(), i, s.first.c_str())); + EXPECT_EQ(arg, i == used_bucket ? s.second : 0) + << "Failed for stat: " << s.first << " bucket: " << i; + } + } + + // Clean up + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +} + +TEST_F(test, disjointCtlBucketStatsTraceDisabled) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + + // Set minimum slab size + size_t slab_min_size = 64 * 1024; + ASSERT_SUCCESS(umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, 4)); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + + size_t arg = 0; + // trace disabled + umf_result_t ret = umfCtlGet("umf.pool.by_handle.{}.stats.alloc_num", &arg, + sizeof(arg), poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED); + + // Clean up + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +} + +TEST_F(test, disjointCtlBucketStatsInvalid) { + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + if (UMF_RESULT_ERROR_NOT_SUPPORTED == + umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_memory_provider_params); + if (providerWrapper.get() == NULL) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_SUCCESS(umfDisjointPoolParamsCreate(¶ms)); + + // Set minimum slab size + size_t slab_min_size = 64 * 1024; + ASSERT_SUCCESS(umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, 4)); + ASSERT_SUCCESS(umfDisjointPoolParamsSetTrace(params, 3)); + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + + size_t arg = 0; + + // invalid bucket index + umf_result_t ret = + umfCtlGet("umf.pool.by_handle.{}.buckets.1000000.stats.alloc_num", &arg, + sizeof(arg), poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + // invalid arg + ret = umfCtlGet("umf.pool.by_handle.{}.stats.alloc_num", NULL, sizeof(arg), + poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + ret = umfCtlGet("umf.pool.by_handle.{}.stats.alloc_num", &arg, 1, + poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + ret = umfCtlGet("umf.pool.by_handle.{}.stats.buckets.count", NULL, + sizeof(arg), poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + ret = umfCtlGet("umf.pool.by_handle.{}.stats.buckets.count", &arg, 1, + poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); + + ret = umfCtlGet("umf.pool.by_handle.{}.stats.buckets.1.alloc_num", NULL, + sizeof(arg), poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + ret = umfCtlGet("umf.pool.by_handle.{}.stats.1.alloc_num", &arg, 1, + poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); + + // no bucket id + ret = umfCtlGet("umf.pool.by_handle.{}.stats.buckets.alloc_num", &arg, + sizeof(arg), poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); + + // bucked id + count + ret = umfCtlGet("umf.pool.by_handle.{}.stats.buckets.1.count", &arg, + sizeof(arg), poolWrapper.get()); + EXPECT_EQ(ret, UMF_RESULT_ERROR_INVALID_CTL_PATH); + + // Clean up + ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params)); + ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params)); +} + +TEST_F(test, disjointCtlParams) { + umf_test::run_in_fork([] { + umf_os_memory_provider_params_handle_t raw_os_params = nullptr; + umf_result_t res = umfOsMemoryProviderParamsCreate(&raw_os_params); + if (res == UMF_RESULT_ERROR_NOT_SUPPORTED) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + + std::unique_ptr + os_params(raw_os_params, &umfOsMemoryProviderParamsDestroy); + + ProviderWrapper providerWrapper(umfOsMemoryProviderOps(), + os_params.get()); + if (providerWrapper.get() == nullptr) { + GTEST_SKIP() << "OS memory provider is not supported!"; + } + + // slab_min_size + { + size_t new_slab_min_size = 128 * 1024; + ASSERT_EQ( + umfCtlSet("umf.pool.default.disjoint.params.slab_min_size", + &new_slab_min_size, sizeof(new_slab_min_size)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(¶ms), UMF_RESULT_SUCCESS); + std::unique_ptr + params_guard(params, &umfDisjointPoolParamsDestroy); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + ASSERT_NE(poolWrapper.get(), nullptr); + + size_t value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.slab_min_size", + &value, sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, new_slab_min_size); + + size_t other = new_slab_min_size + 1024; + EXPECT_EQ(umfCtlSet("umf.pool.by_handle.{}.params.slab_min_size", + &other, sizeof(other), poolWrapper.get()), + UMF_RESULT_ERROR_NOT_SUPPORTED); + } + + // max_poolable_size + { + size_t new_max_poolable = 1 * MB; + ASSERT_EQ( + umfCtlSet("umf.pool.default.disjoint.params.max_poolable_size", + &new_max_poolable, sizeof(new_max_poolable)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(¶ms), UMF_RESULT_SUCCESS); + std::unique_ptr + params_guard(params, &umfDisjointPoolParamsDestroy); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + ASSERT_NE(poolWrapper.get(), nullptr); + + size_t value = 0; + ASSERT_EQ( + umfCtlGet("umf.pool.by_handle.{}.params.max_poolable_size", + &value, sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, new_max_poolable); + + size_t other = new_max_poolable * 2; + EXPECT_EQ( + umfCtlSet("umf.pool.by_handle.{}.params.max_poolable_size", + &other, sizeof(other), poolWrapper.get()), + UMF_RESULT_ERROR_NOT_SUPPORTED); + } + + // capacity + { + size_t new_capacity = 8; + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.capacity", + &new_capacity, sizeof(new_capacity)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(¶ms), UMF_RESULT_SUCCESS); + std::unique_ptr + params_guard(params, &umfDisjointPoolParamsDestroy); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + ASSERT_NE(poolWrapper.get(), nullptr); + + size_t value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.capacity", &value, + sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, new_capacity); + + size_t other = 16; + EXPECT_EQ(umfCtlSet("umf.pool.by_handle.{}.params.capacity", &other, + sizeof(other), poolWrapper.get()), + UMF_RESULT_ERROR_NOT_SUPPORTED); + } + + // min_bucket_size + { + size_t new_min_bucket = 16; + ASSERT_EQ( + umfCtlSet("umf.pool.default.disjoint.params.min_bucket_size", + &new_min_bucket, sizeof(new_min_bucket)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(¶ms), UMF_RESULT_SUCCESS); + std::unique_ptr + params_guard(params, &umfDisjointPoolParamsDestroy); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + ASSERT_NE(poolWrapper.get(), nullptr); + + size_t value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.min_bucket_size", + &value, sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, new_min_bucket); + + size_t other = 32; + EXPECT_EQ(umfCtlSet("umf.pool.by_handle.{}.params.min_bucket_size", + &other, sizeof(other), poolWrapper.get()), + UMF_RESULT_ERROR_NOT_SUPPORTED); + } + + // pool_trace + { + int new_trace = 3; + ASSERT_EQ(umfCtlSet("umf.pool.default.disjoint.params.pool_trace", + &new_trace, sizeof(new_trace)), + UMF_RESULT_SUCCESS); + + umf_disjoint_pool_params_handle_t params = nullptr; + ASSERT_EQ(umfDisjointPoolParamsCreate(¶ms), UMF_RESULT_SUCCESS); + std::unique_ptr + params_guard(params, &umfDisjointPoolParamsDestroy); + + PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(), + params); + ASSERT_NE(poolWrapper.get(), nullptr); + + int value = 0; + ASSERT_EQ(umfCtlGet("umf.pool.by_handle.{}.params.pool_trace", + &value, sizeof(value), poolWrapper.get()), + UMF_RESULT_SUCCESS); + EXPECT_EQ(value, new_trace); + + int other = 1; + EXPECT_EQ(umfCtlSet("umf.pool.by_handle.{}.params.pool_trace", + &other, sizeof(other), poolWrapper.get()), + UMF_RESULT_ERROR_NOT_SUPPORTED); + } + }); +} diff --git a/test/pools/jemalloc_coarse_devdax.cpp b/test/pools/jemalloc_coarse_devdax.cpp index 703f1176b1..8af7809970 100644 --- a/test/pools/jemalloc_coarse_devdax.cpp +++ b/test/pools/jemalloc_coarse_devdax.cpp @@ -10,7 +10,8 @@ bool devDaxEnvSet() { char *path = getenv("UMF_TESTS_DEVDAX_PATH"); char *size = getenv("UMF_TESTS_DEVDAX_SIZE"); - if (path == nullptr || path[0] == 0 || size == nullptr || size[0] == 0) { + if (path == nullptr || path[0] == '\0' || size == nullptr || + size[0] == '\0') { return false; } @@ -20,6 +21,10 @@ bool devDaxEnvSet() { void *createDevDaxParams() { char *path = getenv("UMF_TESTS_DEVDAX_PATH"); char *size = getenv("UMF_TESTS_DEVDAX_SIZE"); + if (path == nullptr || path[0] == '\0' || size == nullptr || + size[0] == '\0') { + return nullptr; + } umf_devdax_memory_provider_params_handle_t params = NULL; umf_result_t res = @@ -41,4 +46,5 @@ static std::vector poolParamsList = : std::vector{}; INSTANTIATE_TEST_SUITE_P(jemallocCoarseDevDaxTest, umfPoolTest, - ::testing::ValuesIn(poolParamsList)); + ::testing::ValuesIn(poolParamsList), + poolCreateExtParamsNameGen); diff --git a/test/pools/jemalloc_coarse_file.cpp b/test/pools/jemalloc_coarse_file.cpp index bce595a2b7..b6f84804fe 100644 --- a/test/pools/jemalloc_coarse_file.cpp +++ b/test/pools/jemalloc_coarse_file.cpp @@ -28,4 +28,5 @@ INSTANTIATE_TEST_SUITE_P(jemallocCoarseFileTest, umfPoolTest, ::testing::Values(poolCreateExtParams{ umfJemallocPoolOps(), nullptr, nullptr, umfFileMemoryProviderOps(), getFileParamsDefault, - destroyFileParams})); + destroyFileParams}), + poolCreateExtParamsNameGen); diff --git a/test/pools/jemalloc_pool.cpp b/test/pools/jemalloc_pool.cpp index 906aba7637..c87b049c1a 100644 --- a/test/pools/jemalloc_pool.cpp +++ b/test/pools/jemalloc_pool.cpp @@ -87,7 +87,8 @@ INSTANTIATE_TEST_SUITE_P( poolCreateExtParams{umfJemallocPoolOps(), createJemallocParams<1>, destroyJemallocParams, umfOsMemoryProviderOps(), createOsMemoryProviderParams, - destroyOsMemoryProviderParams})); + destroyOsMemoryProviderParams}), + poolCreateExtParamsNameGen); // this test makes sure that jemalloc does not use // memory provider to allocate metadata (and hence @@ -225,3 +226,33 @@ TEST_F(test, jemallocProviderDoesNotSupportSplit) { umfMemoryProviderDestroy(ba_provider); umfJemallocPoolParamsDestroy(params); } + +TEST_F(test, jemallocPoolCustomName) { + umf_jemalloc_pool_params_handle_t params = nullptr; + umf_result_t res = umfJemallocPoolParamsCreate(¶ms); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + + res = umfJemallocPoolParamsSetName(params, "my_jemalloc"); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + + auto nullProvider = nullProviderCreate(); + + umf_memory_pool_handle_t pool = NULL; + res = umfPoolCreate(umfJemallocPoolOps(), nullProvider, params, 0, &pool); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + const char *name = nullptr; + res = umfPoolGetName(pool, &name); + EXPECT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "my_jemalloc"); + + umfPoolDestroy(pool); + umfMemoryProviderDestroy(nullProvider); + umfJemallocPoolParamsDestroy(params); +} + +TEST(JemallocPoolOps, default_name_null_handle) { + const char *name = nullptr; + EXPECT_EQ(umfJemallocPoolOps()->get_name(nullptr, &name), + UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "jemalloc"); +} diff --git a/test/pools/pool_base_alloc.cpp b/test/pools/pool_base_alloc.cpp index 4be438936f..583b417cf6 100644 --- a/test/pools/pool_base_alloc.cpp +++ b/test/pools/pool_base_alloc.cpp @@ -7,12 +7,11 @@ #include #include +#include "base_alloc_global.h" #include "pool.hpp" #include "poolFixtures.hpp" #include "provider.hpp" -#include "base_alloc_global.h" - struct base_alloc_pool : public umf_test::pool_base_t { void *malloc(size_t size) noexcept { return umf_ba_global_alloc(size); } @@ -50,6 +49,9 @@ struct base_alloc_pool : public umf_test::pool_base_t { } return UMF_RESULT_SUCCESS; } + umf_result_t ext_trim_memory(size_t) noexcept { + return UMF_RESULT_ERROR_NOT_SUPPORTED; + } }; umf_memory_pool_ops_t BA_POOL_OPS = @@ -58,4 +60,5 @@ umf_memory_pool_ops_t BA_POOL_OPS = INSTANTIATE_TEST_SUITE_P(baPool, umfPoolTest, ::testing::Values(poolCreateExtParams{ &BA_POOL_OPS, nullptr, nullptr, - &umf_test::BASE_PROVIDER_OPS, nullptr, nullptr})); + &umf_test::BASE_PROVIDER_OPS, nullptr, nullptr}), + poolCreateExtParamsNameGen); diff --git a/test/pools/pool_residency.cpp b/test/pools/pool_residency.cpp new file mode 100644 index 0000000000..26ed99e37d --- /dev/null +++ b/test/pools/pool_residency.cpp @@ -0,0 +1,195 @@ +// Copyright (C) 2025 Intel Corporation +// Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include +#include + +#include "../common/level_zero_mocks.h" +#include "pool.hpp" +#include "gtest/gtest.h" + +// On MSVC disable C6285 warning produced by gtest: +// ( || ) is always a non-zero constant. +// Did you intend to use the bitwise-and operator? +#ifdef _MSC_VER +#pragma warning(disable : 6285) +#endif + +using namespace testing; + +class PoolResidencyTestFixture : public Test { + protected: + umf_memory_pool_handle_t pool = nullptr; + const ze_device_handle_t OUR_DEVICE; + StrictMock l0mock; + + PoolResidencyTestFixture() + : OUR_DEVICE(TestCreatePointer(777)) { + *MockedLevelZeroTestEnvironment::l0interface = &l0mock; + } + + void initializeMemoryPool(umf_memory_provider_handle_t provider) { + + auto *params = static_cast( + umf_test::defaultDisjointPoolConfig()); + + EXPECT_EQ(umfPoolCreate(umfDisjointPoolOps(), provider, params, + UMF_POOL_CREATE_FLAG_OWN_PROVIDER, &pool), + UMF_RESULT_SUCCESS); + + umf_test::defaultDisjointPoolConfigDestroy(params); + } + + void SetUp() override {} + void TearDown() override { + if (pool != nullptr) { + EXPECT_CALL(l0mock, zeMemFree(CONTEXT, _)) + .WillRepeatedly(Return(ZE_RESULT_SUCCESS)); + umfPoolDestroy(pool); + } + Mock::VerifyAndClearExpectations(&l0mock); + } +}; + +TEST_F(PoolResidencyTestFixture, + initialResidentDevicesShouldBeUsedDuringAllocation) { + initializeMemoryPool(l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_0, DEVICE_1})); + + EXPECT_CALL(l0mock, zeMemAllocDevice(CONTEXT, _, _, _, OUR_DEVICE, _)) + .WillOnce( + DoAll(SetArgPointee<5>(POINTER_0), Return(ZE_RESULT_SUCCESS))); + + EXPECT_CALL(l0mock, zeContextMakeMemoryResident(CONTEXT, DEVICE_0, _, _)) + .WillOnce(Return(ZE_RESULT_SUCCESS)); + EXPECT_CALL(l0mock, zeContextMakeMemoryResident(CONTEXT, DEVICE_1, _, _)) + .WillOnce(Return(ZE_RESULT_SUCCESS)); + + void *ptr = umfPoolMalloc(pool, 123); + EXPECT_EQ(ptr, POINTER_0); + + umfPoolFree(pool, ptr); +} + +TEST_F(PoolResidencyTestFixture, + addedResidentDevicesShouldBeUsedDuringAllocation) { + initializeMemoryPool(l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_0})); + + umf_memory_provider_handle_t provider = nullptr; + EXPECT_EQ(umfPoolGetMemoryProvider(pool, &provider), UMF_RESULT_SUCCESS); + umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_4, true); + + EXPECT_CALL(l0mock, zeMemAllocDevice(CONTEXT, _, _, _, OUR_DEVICE, _)) + .WillOnce( + DoAll(SetArgPointee<5>(POINTER_0), Return(ZE_RESULT_SUCCESS))); + EXPECT_CALL(l0mock, zeContextMakeMemoryResident(CONTEXT, DEVICE_0, _, _)) + .WillOnce(Return(ZE_RESULT_SUCCESS)); + EXPECT_CALL(l0mock, zeContextMakeMemoryResident(CONTEXT, DEVICE_4, _, _)) + .WillOnce(Return(ZE_RESULT_SUCCESS)); + + void *ptr = umfPoolMalloc(pool, 123); + EXPECT_EQ(ptr, POINTER_0); + + umfPoolFree(pool, ptr); +} + +TEST_F(PoolResidencyTestFixture, + existingAllocationsShouldBeMadeResidentOnAddedDevice) { + initializeMemoryPool( + l0mock.initializeMemoryProviderWithResidentDevices(OUR_DEVICE, {})); + + EXPECT_CALL(l0mock, zeMemAllocDevice(CONTEXT, _, _, _, OUR_DEVICE, _)) + .WillOnce( + DoAll(SetArgPointee<5>(POINTER_0), Return(ZE_RESULT_SUCCESS))); + + void *ptr = umfPoolMalloc(pool, 123); + EXPECT_EQ(ptr, POINTER_0); + + EXPECT_CALL(l0mock, zeContextMakeMemoryResident(CONTEXT, DEVICE_4, _, _)) + .WillOnce(Return(ZE_RESULT_SUCCESS)); + + umf_memory_provider_handle_t provider = nullptr; + EXPECT_EQ(umfPoolGetMemoryProvider(pool, &provider), UMF_RESULT_SUCCESS); + umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_4, true); + + umfPoolFree(pool, ptr); +} + +TEST_F(PoolResidencyTestFixture, + existingAllocationsShouldBeEvictedFromRemovedDevice) { + initializeMemoryPool(l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_2, DEVICE_3})); + + EXPECT_CALL(l0mock, zeMemAllocDevice(CONTEXT, _, _, _, OUR_DEVICE, _)) + .WillOnce( + DoAll(SetArgPointee<5>(POINTER_0), Return(ZE_RESULT_SUCCESS))); + EXPECT_CALL(l0mock, zeContextMakeMemoryResident(CONTEXT, DEVICE_2, _, _)) + .WillOnce(Return(ZE_RESULT_SUCCESS)); + EXPECT_CALL(l0mock, zeContextMakeMemoryResident(CONTEXT, DEVICE_3, _, _)) + .WillOnce(Return(ZE_RESULT_SUCCESS)); + + void *ptr = umfPoolMalloc(pool, 123); + EXPECT_EQ(ptr, POINTER_0); + + EXPECT_CALL(l0mock, zeContextEvictMemory(CONTEXT, DEVICE_2, _, _)) + .WillOnce(Return(ZE_RESULT_SUCCESS)); + + umf_memory_provider_handle_t provider = nullptr; + EXPECT_EQ(umfPoolGetMemoryProvider(pool, &provider), UMF_RESULT_SUCCESS); + umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_2, false); + + umfPoolFree(pool, ptr); +} + +TEST_F(PoolResidencyTestFixture, + allocationShouldNotBeMadeResidentOnRemovedDevice) { + initializeMemoryPool(l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_2})); + + umf_memory_provider_handle_t provider = nullptr; + EXPECT_EQ(umfPoolGetMemoryProvider(pool, &provider), UMF_RESULT_SUCCESS); + umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_2, false); + + EXPECT_CALL(l0mock, zeMemAllocDevice(CONTEXT, _, _, _, OUR_DEVICE, _)) + .WillOnce( + DoAll(SetArgPointee<5>(POINTER_0), Return(ZE_RESULT_SUCCESS))); + EXPECT_CALL(l0mock, zeContextMakeMemoryResident(CONTEXT, DEVICE_2, _, _)) + .Times(0); // not called + + void *ptr = umfPoolMalloc(pool, 123); + EXPECT_EQ(ptr, POINTER_0); + + umfPoolFree(pool, ptr); +} + +TEST_F(PoolResidencyTestFixture, + allocationThatFailedToBeMadeResidedShouldBeFreed) { + initializeMemoryPool(l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_2})); + + EXPECT_CALL(l0mock, zeMemAllocDevice(CONTEXT, _, _, _, OUR_DEVICE, _)) + .WillOnce( + DoAll(SetArgPointee<5>(POINTER_0), Return(ZE_RESULT_SUCCESS))); + EXPECT_CALL(l0mock, zeContextMakeMemoryResident(CONTEXT, DEVICE_2, _, _)) + .WillOnce(Return(ZE_RESULT_ERROR_DEVICE_LOST)); + EXPECT_CALL(l0mock, zeMemFree(CONTEXT, _)) + .WillOnce(Return(ZE_RESULT_ERROR_DEVICE_IN_LOW_POWER_STATE)); + + void *ptr = umfPoolMalloc(pool, 16 * 1024 * 1024); + EXPECT_EQ(ptr, nullptr); + + umfPoolFree(pool, ptr); +} + +int main(int argc, char **argv) { + try { + InitGoogleTest(&argc, argv); + AddGlobalTestEnvironment(new MockedLevelZeroTestEnvironment); + return RUN_ALL_TESTS(); + } catch (...) { + std::cerr << "Exception occurred." << std::endl; + return 1; + } +} diff --git a/test/pools/scalable_coarse_devdax.cpp b/test/pools/scalable_coarse_devdax.cpp index 8dc8d576ba..83fa855279 100644 --- a/test/pools/scalable_coarse_devdax.cpp +++ b/test/pools/scalable_coarse_devdax.cpp @@ -10,7 +10,8 @@ bool devDaxEnvSet() { char *path = getenv("UMF_TESTS_DEVDAX_PATH"); char *size = getenv("UMF_TESTS_DEVDAX_SIZE"); - if (path == nullptr || path[0] == 0 || size == nullptr || size[0] == 0) { + if (path == nullptr || path[0] == '\0' || size == nullptr || + size[0] == '\0') { return false; } @@ -20,6 +21,10 @@ bool devDaxEnvSet() { void *createDevDaxParams() { char *path = getenv("UMF_TESTS_DEVDAX_PATH"); char *size = getenv("UMF_TESTS_DEVDAX_SIZE"); + if (path == nullptr || path[0] == '\0' || size == nullptr || + size[0] == '\0') { + return nullptr; + } umf_devdax_memory_provider_params_handle_t params = NULL; umf_result_t res = @@ -45,4 +50,5 @@ static std::vector poolParamsList = : std::vector{}; INSTANTIATE_TEST_SUITE_P(scalableCoarseDevDaxTest, umfPoolTest, - ::testing::ValuesIn(poolParamsList)); + ::testing::ValuesIn(poolParamsList), + poolCreateExtParamsNameGen); diff --git a/test/pools/scalable_coarse_file.cpp b/test/pools/scalable_coarse_file.cpp index b9865b7811..778a47202c 100644 --- a/test/pools/scalable_coarse_file.cpp +++ b/test/pools/scalable_coarse_file.cpp @@ -28,4 +28,5 @@ INSTANTIATE_TEST_SUITE_P(scalableCoarseFileTest, umfPoolTest, ::testing::Values(poolCreateExtParams{ umfScalablePoolOps(), nullptr, nullptr, umfFileMemoryProviderOps(), getFileParamsDefault, - destroyFileParams})); + destroyFileParams}), + poolCreateExtParamsNameGen); diff --git a/test/pools/scalable_pool.cpp b/test/pools/scalable_pool.cpp index 0c68302014..d34e16826c 100644 --- a/test/pools/scalable_pool.cpp +++ b/test/pools/scalable_pool.cpp @@ -28,7 +28,8 @@ INSTANTIATE_TEST_SUITE_P( scalablePoolTest, umfPoolTest, ::testing::Values(poolCreateExtParams{ umfScalablePoolOps(), nullptr, nullptr, umfOsMemoryProviderOps(), - createOsMemoryProviderParams, destroyOsMemoryProviderParams})); + createOsMemoryProviderParams, destroyOsMemoryProviderParams}), + poolCreateExtParamsNameGen); using scalablePoolParams = std::tuple; struct umfScalablePoolParamsTest @@ -165,7 +166,12 @@ INSTANTIATE_TEST_SUITE_P( scalablePoolTest, umfScalablePoolParamsTest, testing::Combine(testing::Values(2 * 1024 * 1024, 3 * 1024 * 1024, 4 * 1024 * 1024, 5 * 1024 * 1024), - testing::Values(false, true))); + testing::Values(false, true)), + ([](auto const &info) -> std::string { + return "scalable_granularity_" + + std::to_string(std::get<0>(info.param)) + "_keep_all_memory" + + (std::get<1>(info.param) ? "_true" : "_false"); + })); TEST(scalablePoolTest, scalablePoolName) { umf_memory_pool_handle_t pool = nullptr; @@ -189,3 +195,40 @@ TEST(scalablePoolTest, scalablePoolName) { umfMemoryProviderDestroy(provider); umfOsMemoryProviderParamsDestroy(provider_params); } + +TEST(scalablePoolTest, scalablePoolCustomName) { + umf_memory_pool_handle_t pool = nullptr; + umf_os_memory_provider_params_handle_t provider_params = nullptr; + umf_memory_provider_handle_t provider = nullptr; + + auto ret = umfOsMemoryProviderParamsCreate(&provider_params); + ret = umfMemoryProviderCreate(umfOsMemoryProviderOps(), provider_params, + &provider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_scalable_pool_params_handle_t params = nullptr; + ret = umfScalablePoolParamsCreate(¶ms); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(umfScalablePoolParamsSetName(params, "custom_scalable"), + UMF_RESULT_SUCCESS); + + ret = umfPoolCreate(umfScalablePoolOps(), provider, params, 0, &pool); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *name = nullptr; + ret = umfPoolGetName(pool, &name); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "custom_scalable"); + + umfPoolDestroy(pool); + umfScalablePoolParamsDestroy(params); + umfMemoryProviderDestroy(provider); + umfOsMemoryProviderParamsDestroy(provider_params); +} + +TEST(scalablePoolTest, default_name_null_handle) { + const char *name = nullptr; + EXPECT_EQ(umfScalablePoolOps()->get_name(nullptr, &name), + UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "scalable"); +} diff --git a/test/properties/provider_properties.cpp b/test/properties/provider_properties.cpp new file mode 100644 index 0000000000..73542b528a --- /dev/null +++ b/test/properties/provider_properties.cpp @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +*/ + +#include + +#include "memory_properties_internal.h" +#include "provider.hpp" +#include "provider_properties.hpp" + +void createFixedProvider(umf_memory_provider_handle_t *out_provider, + void *out_data) { + constexpr size_t buffer_size = 1024 * 1024; + + void *memory_buffer = malloc(buffer_size); + ASSERT_NE(memory_buffer, nullptr); + + umf_fixed_memory_provider_params_handle_t params = nullptr; + umf_result_t res = + umfFixedMemoryProviderParamsCreate(memory_buffer, buffer_size, ¶ms); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(params, nullptr); + + res = umfMemoryProviderCreate(umfFixedMemoryProviderOps(), params, + out_provider); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(out_provider, nullptr); + + umfFixedMemoryProviderParamsDestroy(params); + + *(uintptr_t *)out_data = (uintptr_t)memory_buffer; +} + +void destroyFixedProvider(umf_memory_provider_handle_t provider, void *data) { + umfMemoryProviderDestroy(provider); + free(data); +} + +void createOsMemoryProvider(umf_memory_provider_handle_t *out_provider, + void *out_data) { + + umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; + umf_result_t res = + umfOsMemoryProviderParamsCreate(&os_memory_provider_params); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(os_memory_provider_params, nullptr); + + umf_memory_provider_handle_t os_memory_provider = nullptr; + res = + umfMemoryProviderCreate(umfOsMemoryProviderOps(), + os_memory_provider_params, &os_memory_provider); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(os_memory_provider, nullptr); + + res = umfOsMemoryProviderParamsDestroy(os_memory_provider_params); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + + *out_provider = os_memory_provider; + *(uintptr_t *)out_data = (uintptr_t)NULL; +} + +void destroyOsMemoryProvider(umf_memory_provider_handle_t provider, + void *data) { + (void)data; // unused + + umfMemoryProviderDestroy(provider); +} + +INSTANTIATE_TEST_SUITE_P( + providerPropsTest, ProviderPropsTest, + ::testing::Values(testParams{createFixedProvider, destroyFixedProvider, + "fixedProvider"}, + testParams{createOsMemoryProvider, + destroyOsMemoryProvider, "osMemoryProvider"}), + nameGen); + +TEST_F(test, CustomPropsTest) { + const uint64_t custom_property_id = UMF_MEMORY_PROPERTY_MAX_RESERVED + 1; + + struct memory_provider : public umf_test::provider_base_t { + umf_result_t alloc(size_t size, size_t alignment, void **ptr) noexcept { + *ptr = umf_ba_global_aligned_alloc(size, alignment); + return UMF_RESULT_SUCCESS; + } + + umf_result_t free(void *ptr, [[maybe_unused]] size_t size) noexcept { + umf_ba_global_free(ptr); + return UMF_RESULT_SUCCESS; + } + + umf_result_t + get_min_page_size([[maybe_unused]] const void *ptr, + [[maybe_unused]] size_t *pageSize) noexcept { + *pageSize = 1024; + return UMF_RESULT_SUCCESS; + } + + umf_result_t ext_get_allocation_properties( + const void *ptr, umf_memory_property_id_t memory_property_id, + void *value) { + + (void)ptr; // unused + + if (memory_property_id == custom_property_id) { + *(uint64_t *)value = 42; // Custom value for the property + return UMF_RESULT_SUCCESS; + } + + return umf_test::provider_base_t::ext_get_allocation_properties( + ptr, memory_property_id, value); + } + + umf_result_t ext_get_allocation_properties_size( + umf_memory_property_id_t memory_property_id, size_t *size) { + if (memory_property_id == custom_property_id) { + *size = sizeof(uint64_t); + return UMF_RESULT_SUCCESS; + } + return umf_test::provider_base_t:: + ext_get_allocation_properties_size(memory_property_id, size); + } + }; + + umf_memory_provider_ops_t provider_ops = + umf_test::providerMakeCOps(); + + umf_memory_provider_handle_t provider = nullptr; + umf_result_t res = + umfMemoryProviderCreate(&provider_ops, nullptr, &provider); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(provider, nullptr); + + umf_memory_pool_handle_t pool = nullptr; + res = umfPoolCreate(umfProxyPoolOps(), provider, nullptr, + UMF_POOL_CREATE_FLAG_NONE, &pool); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(pool, nullptr); + + void *ptr = umfPoolMalloc(pool, 1024); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(ptr, nullptr); + + umf_memory_properties_handle_t properties = nullptr; + res = umfGetMemoryPropertiesHandle(ptr, &properties); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(properties, nullptr); + + // get value of the custom property from the properties handle + uint64_t value2 = 0; + res = umfGetMemoryProperty(properties, + (umf_memory_property_id_t)custom_property_id, + &value2, sizeof(value2)); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_EQ(value2, 42); + + res = umfPoolFree(pool, ptr); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + + res = umfPoolDestroy(pool); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + + res = umfMemoryProviderDestroy(provider); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); +} diff --git a/test/properties/provider_properties.hpp b/test/properties/provider_properties.hpp new file mode 100644 index 0000000000..2f51ad947f --- /dev/null +++ b/test/properties/provider_properties.hpp @@ -0,0 +1,239 @@ +/* + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +*/ + +#include +#include +#include +#include + +#include "base.hpp" +#include "test_helpers.h" + +using umf_test::test; + +using testParams = + std::tuple, + std::function, + const char *>; + +std::string nameGen(const testing::TestParamInfo param) { + return std::get<2>(param.param); +} + +struct ProviderPropsTest : umf_test::test, + ::testing::WithParamInterface { + void SetUp() override { + test::SetUp(); + + auto [create_fun, destroy_fun, name] = this->GetParam(); + provider_create = std::move(create_fun); + provider_destroy = std::move(destroy_fun); + (void)name; // unused + + provider_create(&provider, &data); + ASSERT_NE(provider, nullptr); + + umf_result_t umf_result = + umfPoolCreate(umfProxyPoolOps(), provider, nullptr, 0, &pool); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + } + + void TearDown() override { + umfPoolDestroy(pool); + provider_destroy(provider, data); + test::TearDown(); + } + + umf_memory_provider_handle_t provider; + umf_memory_pool_handle_t pool; + + std::function provider_create; + std::function provider_destroy; + void *data; +}; + +TEST_P(ProviderPropsTest, genericProps) { + umf_result_t umf_result; + const size_t alloc_size = 8; + + void *ptr = umfPoolMalloc(pool, alloc_size); + ASSERT_NE(ptr, nullptr); + + umf_memory_properties_handle_t props_handle = nullptr; + umf_result = umfGetMemoryPropertiesHandle(ptr, &props_handle); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_NE(props_handle, nullptr); + + umf_memory_provider_handle_t param_provider = nullptr; + umf_result = + umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_PROVIDER_HANDLE, + ¶m_provider, sizeof(param_provider)); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_EQ(param_provider, provider); + + umf_memory_pool_handle_t param_pool = nullptr; + umf_result = + umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_POOL_HANDLE, + ¶m_pool, sizeof(param_pool)); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_EQ(param_pool, pool); + + void *base_address = nullptr; + umf_result = + umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BASE_ADDRESS, + &base_address, sizeof(base_address)); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_EQ(base_address, ptr); + + size_t size = 0; + umf_result = umfGetMemoryProperty( + props_handle, UMF_MEMORY_PROPERTY_BASE_SIZE, &size, sizeof(size)); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_EQ(size, alloc_size); + + uint64_t buffer_id = 0; + umf_result = + umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BUFFER_ID, + &buffer_id, sizeof(buffer_id)); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_GE(buffer_id, 0); + + umf_result = umfPoolFree(pool, ptr); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); +} + +TEST_P(ProviderPropsTest, baseAddressFromMiddle) { + umf_result_t umf_result; + const size_t alloc_size = 8; + + void *ptr = umfPoolMalloc(pool, alloc_size); + ASSERT_NE(ptr, nullptr); + + void *ptr_mid = (void *)((uintptr_t)ptr + (alloc_size / 2)); + umf_memory_properties_handle_t props_handle = nullptr; + umf_result = umfGetMemoryPropertiesHandle(ptr_mid, &props_handle); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_NE(props_handle, nullptr); + + uintptr_t param_base_address = 0; + umf_result = + umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BASE_ADDRESS, + ¶m_base_address, sizeof(param_base_address)); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_EQ(param_base_address, (uintptr_t)ptr); + + umf_result = umfPoolFree(pool, ptr); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); +} + +TEST_P(ProviderPropsTest, uniqueBufferId) { + size_t alloc_size = 8; + size_t num_allocs = 10; + umf_result_t umf_result; + std::set buffer_ids; + + for (size_t i = 0; i < num_allocs; ++i) { + void *ptr = umfPoolMalloc(pool, alloc_size); + ASSERT_NE(ptr, nullptr); + + umf_memory_properties_handle_t props_handle = nullptr; + umf_result = umfGetMemoryPropertiesHandle(ptr, &props_handle); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_NE(props_handle, nullptr); + + uint64_t buffer_id = 0; + umf_result = + umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BUFFER_ID, + &buffer_id, sizeof(buffer_id)); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_GE(buffer_id, 0); + + // Ensure that the buffer ID is unique by inserting it into a set and + // checking if it was already present + ASSERT_TRUE(buffer_ids.find(buffer_id) == buffer_ids.end()); + ASSERT_TRUE(buffer_ids.insert(buffer_id).second); + + umf_result = umfPoolFree(pool, ptr); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + } +} + +// Negative tests + +TEST_P(ProviderPropsTest, invalidPointer) { + umf_memory_properties_handle_t props_handle = nullptr; + umf_result_t umf_result = + umfGetMemoryPropertiesHandle(nullptr, &props_handle); + ASSERT_EQ(umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT); + ASSERT_EQ(props_handle, nullptr); + + uintptr_t invalid_ptr = 0xdeadbeef; + umf_result = + umfGetMemoryPropertiesHandle((void *)invalid_ptr, &props_handle); + ASSERT_EQ(umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT); + ASSERT_EQ(props_handle, nullptr); +} + +TEST_P(ProviderPropsTest, invalidPropertyId) { + void *ptr = umfPoolMalloc(pool, 8); + ASSERT_NE(ptr, nullptr); + + umf_memory_properties_handle_t props_handle = nullptr; + umf_result_t res = umfGetMemoryPropertiesHandle(ptr, &props_handle); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(props_handle, nullptr); + + void *value = nullptr; + res = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_INVALID, + &value, sizeof(value)); + ASSERT_EQ(res, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + res = umfPoolFree(pool, ptr); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); +} + +TEST_P(ProviderPropsTest, invalidPropertyValue) { + void *ptr = umfPoolMalloc(pool, 8); + ASSERT_NE(ptr, nullptr); + + umf_memory_properties_handle_t props_handle = nullptr; + umf_result_t res = umfGetMemoryPropertiesHandle(ptr, &props_handle); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(props_handle, nullptr); + + res = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BASE_ADDRESS, + NULL, sizeof(int)); + ASSERT_EQ(res, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + res = umfPoolFree(pool, ptr); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); +} + +TEST_P(ProviderPropsTest, invalidPropertySize) { + void *ptr = umfPoolMalloc(pool, 8); + ASSERT_NE(ptr, nullptr); + + umf_memory_properties_handle_t props_handle = nullptr; + umf_result_t res = umfGetMemoryPropertiesHandle(ptr, &props_handle); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(props_handle, nullptr); + + int value = 0; + res = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BASE_ADDRESS, + &value, size_t(0)); + ASSERT_EQ(res, UMF_RESULT_ERROR_INVALID_ARGUMENT); + + res = umfPoolFree(pool, ptr); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); +} + +TEST_P(ProviderPropsTest, nullPropertiesHandle) { + int val = 0; + umf_result_t res = umfGetMemoryProperty( + NULL, UMF_MEMORY_PROPERTY_BASE_ADDRESS, &val, sizeof(val)); + ASSERT_EQ(res, UMF_RESULT_ERROR_INVALID_ARGUMENT); +} diff --git a/test/properties/provider_properties_cuda.cpp b/test/properties/provider_properties_cuda.cpp new file mode 100644 index 0000000000..4ed5b8c02e --- /dev/null +++ b/test/properties/provider_properties_cuda.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +*/ + +#include "provider_properties.hpp" +#include "providers/cuda_helpers.h" + +void createCudaMemoryProvider(umf_memory_provider_handle_t *out_provider, + void *out_data) { + CUdevice hDevice = -1; + CUcontext hContext = NULL; + + int ret = init_cuda(); + ASSERT_EQ(ret, 0); + + ret = get_cuda_device(&hDevice); + ASSERT_EQ(ret, 0); + ASSERT_NE(hDevice, -1); + + ret = create_context(hDevice, &hContext); + ASSERT_EQ(ret, 0); + ASSERT_NE(hContext, nullptr); + + umf_cuda_memory_provider_params_handle_t cu_params = NULL; + umf_result_t umf_result = umfCUDAMemoryProviderParamsCreate(&cu_params); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_NE(cu_params, nullptr); + + umf_result = umfCUDAMemoryProviderParamsSetContext(cu_params, hContext); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + + umf_result = umfCUDAMemoryProviderParamsSetDevice(cu_params, hDevice); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + + umf_result = umfCUDAMemoryProviderParamsSetMemoryType( + cu_params, UMF_MEMORY_TYPE_DEVICE); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t provider = nullptr; + umf_result = umfMemoryProviderCreate(umfCUDAMemoryProviderOps(), cu_params, + &provider); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_NE(provider, nullptr); + + umfCUDAMemoryProviderParamsDestroy(cu_params); + + *out_provider = provider; + *(uintptr_t *)out_data = (uintptr_t)hContext; +} + +void destroyCudaMemoryProvider(umf_memory_provider_handle_t provider, + void *data) { + destroy_context((CUcontext)data); + umfMemoryProviderDestroy(provider); +} + +INSTANTIATE_TEST_SUITE_P(providerPropsTest, ProviderPropsTest, + ::testing::Values(testParams{createCudaMemoryProvider, + destroyCudaMemoryProvider, + "cudaMemoryProvider"}), + nameGen); diff --git a/test/properties/provider_properties_level_zero.cpp b/test/properties/provider_properties_level_zero.cpp new file mode 100644 index 0000000000..aa420cf500 --- /dev/null +++ b/test/properties/provider_properties_level_zero.cpp @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2025 Intel Corporation + * + * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +*/ + +#include "provider_properties.hpp" +#include "utils/utils_level_zero.h" + +void levelZeroMemoryProviderCreate(umf_memory_provider_handle_t *out_provider, + void *out_data) { + + ze_driver_handle_t hDriver = nullptr; + ze_device_handle_t hDevice = nullptr; + ze_context_handle_t hContext = nullptr; + uint32_t driver_idx = 0; + + int ret = utils_ze_init_level_zero(); + ASSERT_EQ(ret, 0); + + ret = utils_ze_find_driver_with_gpu(&driver_idx, &hDriver); + ASSERT_EQ(ret, 0); + + ret = utils_ze_find_gpu_device(hDriver, &hDevice); + ASSERT_EQ(ret, 0); + + ret = utils_ze_create_context(hDriver, &hContext); + ASSERT_EQ(ret, 0); + + umf_level_zero_memory_provider_params_handle_t params = nullptr; + umf_result_t result = umfLevelZeroMemoryProviderParamsCreate(¶ms); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + result = umfLevelZeroMemoryProviderParamsSetContext(params, hContext); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + result = umfLevelZeroMemoryProviderParamsSetDevice(params, hDevice); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + result = umfLevelZeroMemoryProviderParamsSetMemoryType( + params, UMF_MEMORY_TYPE_DEVICE); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t provider = nullptr; + umf_result_t umf_result = umfMemoryProviderCreate( + umfLevelZeroMemoryProviderOps(), params, &provider); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_NE(provider, nullptr); + + result = umfLevelZeroMemoryProviderParamsDestroy(params); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + + *out_provider = provider; + *(uintptr_t *)out_data = (uintptr_t)hContext; +} + +void levelZeroMemoryProviderDestroy(umf_memory_provider_handle_t provider, + void *data) { + umfMemoryProviderDestroy(provider); + utils_ze_destroy_context((ze_context_handle_t)data); +} + +INSTANTIATE_TEST_SUITE_P(providerPropsTest, ProviderPropsTest, + ::testing::Values(testParams{ + levelZeroMemoryProviderCreate, + levelZeroMemoryProviderDestroy, + "levelZeroProvider"}), + nameGen); diff --git a/test/provider_devdax_memory.cpp b/test/provider_devdax_memory.cpp index 31947fd3b0..b22f4fa291 100644 --- a/test/provider_devdax_memory.cpp +++ b/test/provider_devdax_memory.cpp @@ -3,21 +3,23 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception #ifndef _WIN32 -#include "test_helpers_linux.h" #include #include #include #endif -#include "base.hpp" - -#include "test_helpers.h" -#include "utils/cpp_helpers.hpp" - #include #include #include +#include "base.hpp" +#include "provider.hpp" +#include "test_helpers.h" +#include "utils/cpp_helpers.hpp" +#ifndef _WIN32 +#include "test_helpers_linux.h" +#endif + using umf_test::test; #define INVALID_PTR ((void *)0x01) @@ -44,23 +46,6 @@ static int compare_native_error_str(const char *message, int error) { return strncmp(message, error_str, len); } -using providerCreateExtParams = - std::tuple; - -static void providerCreateExt(providerCreateExtParams params, - umf_test::provider_unique_handle_t *handle) { - umf_memory_provider_handle_t hProvider = nullptr; - auto [provider_ops, provider_params] = params; - - auto ret = - umfMemoryProviderCreate(provider_ops, provider_params, &hProvider); - ASSERT_EQ(ret, UMF_RESULT_SUCCESS); - ASSERT_NE(hProvider, nullptr); - - *handle = umf_test::provider_unique_handle_t(hProvider, - &umfMemoryProviderDestroy); -} - struct umfProviderTest : umf_test::test, ::testing::WithParamInterface { @@ -138,7 +123,7 @@ TEST_F(test, test_if_mapped_with_MAP_SYNC) { umf_result_t umf_result; char *path = getenv("UMF_TESTS_DEVDAX_PATH"); - if (path == nullptr || path[0] == 0) { + if (path == nullptr || path[0] == '\0') { GTEST_SKIP() << "Test skipped, UMF_TESTS_DEVDAX_PATH is not set"; } @@ -184,7 +169,8 @@ using devdax_params_unique_handle_t = devdax_params_unique_handle_t create_devdax_params() { char *path = getenv("UMF_TESTS_DEVDAX_PATH"); char *size = getenv("UMF_TESTS_DEVDAX_SIZE"); - if (path == nullptr || path[0] == 0 || size == nullptr || size[0] == 0) { + if (path == nullptr || path[0] == '\0' || size == nullptr || + size[0] == '\0') { return devdax_params_unique_handle_t( nullptr, &umfDevDaxMemoryProviderParamsDestroy); } @@ -212,7 +198,8 @@ static std::vector devdaxProviderTestParamsList = GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(umfProviderTest); INSTANTIATE_TEST_SUITE_P(devdaxProviderTest, umfProviderTest, - ::testing::ValuesIn(devdaxProviderTestParamsList)); + ::testing::ValuesIn(devdaxProviderTestParamsList), + providerCreateExtParamsNameGen); TEST_P(umfProviderTest, create_destroy) {} @@ -305,6 +292,36 @@ TEST_P(umfProviderTest, get_name) { ASSERT_STREQ(name, "DEVDAX"); } +TEST(DevDaxProviderName, custom_name) { + auto params_handle = create_devdax_params(); + if (!params_handle.get()) { + GTEST_SKIP() << "devdax params unavailable"; + } + + const char *custom = "my_devdax"; + auto ret = + umfDevDaxMemoryProviderParamsSetName(params_handle.get(), custom); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t prov = nullptr; + ret = umfMemoryProviderCreate(umfDevDaxMemoryProviderOps(), + params_handle.get(), &prov); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *name = nullptr; + ret = umfMemoryProviderGetName(prov, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); + umfMemoryProviderDestroy(prov); +} + +TEST(DevDaxProviderName, default_name_null_handle) { + const char *name = nullptr; + EXPECT_EQ(umfDevDaxMemoryProviderOps()->get_name(nullptr, &name), + UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "DEVDAX"); +} + TEST_P(umfProviderTest, free_size_0_ptr_not_null) { umf_result_t umf_result = umfMemoryProviderFree(provider.get(), INVALID_PTR, 0); diff --git a/test/provider_devdax_memory_ipc.cpp b/test/provider_devdax_memory_ipc.cpp index d88b1f0053..1db7011534 100644 --- a/test/provider_devdax_memory_ipc.cpp +++ b/test/provider_devdax_memory_ipc.cpp @@ -18,7 +18,8 @@ using umf_test::test; bool devDaxEnvSet() { char *path = getenv("UMF_TESTS_DEVDAX_PATH"); char *size = getenv("UMF_TESTS_DEVDAX_SIZE"); - if (path == nullptr || path[0] == 0 || size == nullptr || size[0] == 0) { + if (path == nullptr || path[0] == '\0' || size == nullptr || + size[0] == '\0') { return false; } @@ -28,7 +29,8 @@ bool devDaxEnvSet() { void *defaultDevDaxParamsCreate() { char *path = getenv("UMF_TESTS_DEVDAX_PATH"); char *size = getenv("UMF_TESTS_DEVDAX_SIZE"); - if (path == nullptr || path[0] == 0 || size == nullptr || size[0] == 0) { + if (path == nullptr || path[0] == '\0' || size == nullptr || + size[0] == '\0') { return nullptr; } @@ -77,4 +79,5 @@ static std::vector getIpcProxyPoolTestParamsList(void) { GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(umfIpcTest); INSTANTIATE_TEST_SUITE_P(DevDaxProviderDifferentPoolsTest, umfIpcTest, - ::testing::ValuesIn(getIpcProxyPoolTestParamsList())); + ::testing::ValuesIn(getIpcProxyPoolTestParamsList()), + ipcTestParamsNameGen); diff --git a/test/provider_file_memory.cpp b/test/provider_file_memory.cpp index f79dac8491..c59223eb5a 100644 --- a/test/provider_file_memory.cpp +++ b/test/provider_file_memory.cpp @@ -2,18 +2,18 @@ // Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#include "base.hpp" +#include +#include +#include +#include "base.hpp" +#include "provider.hpp" #include "test_helpers.h" #include "utils/cpp_helpers.hpp" #ifndef _WIN32 #include "test_helpers_linux.h" #endif -#include -#include -#include - using umf_test::test; #define FILE_PATH ((char *)"tmp_file") @@ -40,23 +40,6 @@ static int compare_native_error_str(const char *message, int error) { return strncmp(message, error_str, len); } -using providerCreateExtParams = - std::tuple; - -static void providerCreateExt(providerCreateExtParams params, - umf_test::provider_unique_handle_t *handle) { - umf_memory_provider_handle_t hProvider = nullptr; - auto [provider_ops, provider_params] = params; - - auto ret = - umfMemoryProviderCreate(provider_ops, provider_params, &hProvider); - ASSERT_EQ(ret, UMF_RESULT_SUCCESS); - ASSERT_NE(hProvider, nullptr); - - *handle = umf_test::provider_unique_handle_t(hProvider, - &umfMemoryProviderDestroy); -} - struct FileProviderParamsDefault : umf_test::test, ::testing::WithParamInterface { @@ -136,7 +119,7 @@ TEST_F(test, test_if_mapped_with_MAP_SYNC) { umf_result_t umf_result; char *path = getenv("UMF_TESTS_FSDAX_PATH"); - if (path == nullptr || path[0] == 0) { + if (path == nullptr || path[0] == '\0') { GTEST_SKIP() << "Test skipped, UMF_TESTS_FSDAX_PATH is not set"; } @@ -219,7 +202,8 @@ file_params_unique_handle_t file_params_shared = INSTANTIATE_TEST_SUITE_P(fileProviderTest, FileProviderParamsDefault, ::testing::Values(providerCreateExtParams{ umfFileMemoryProviderOps(), - file_params_default.get()})); + file_params_default.get()}), + providerCreateExtParamsNameGen); TEST_P(FileProviderParamsDefault, create_destroy) {} @@ -370,6 +354,32 @@ TEST_P(FileProviderParamsDefault, get_name) { ASSERT_STREQ(name, "FILE"); } +TEST(FileProviderName, custom_name) { + auto params = get_file_params_default(FILE_PATH); + ASSERT_NE(params.get(), nullptr); + + const char *custom = "my_file"; + auto ret = umfFileMemoryProviderParamsSetName(params.get(), custom); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t prov = nullptr; + ret = umfMemoryProviderCreate(umfFileMemoryProviderOps(), params.get(), + &prov); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + const char *name = nullptr; + ret = umfMemoryProviderGetName(prov, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); + umfMemoryProviderDestroy(prov); +} + +TEST(FileProviderName, default_name_null_handle) { + const char *name = nullptr; + auto ret = umfFileMemoryProviderOps()->get_name(nullptr, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "FILE"); +} + TEST_P(FileProviderParamsDefault, free_size_0_ptr_not_null) { umf_result_t umf_result = umfMemoryProviderFree(provider.get(), INVALID_PTR, 0); @@ -531,7 +541,8 @@ TEST_P(FileProviderParamsDefault, purge_force_INVALID_POINTER) { INSTANTIATE_TEST_SUITE_P(fileProviderTest, FileProviderParamsShared, ::testing::Values(providerCreateExtParams{ umfFileMemoryProviderOps(), - file_params_shared.get()})); + file_params_shared.get()}), + providerCreateExtParamsNameGen); TEST_P(FileProviderParamsShared, IPC_base_success_test) { umf_result_t umf_result; diff --git a/test/provider_file_memory_ipc.cpp b/test/provider_file_memory_ipc.cpp index b749772f41..fe50d8408f 100644 --- a/test/provider_file_memory_ipc.cpp +++ b/test/provider_file_memory_ipc.cpp @@ -114,7 +114,7 @@ static std::vector getIpcFsDaxTestParamsList(void) { std::vector ipcFsDaxTestParamsList = {}; char *path = getenv("UMF_TESTS_FSDAX_PATH"); - if (path == nullptr || path[0] == 0) { + if (path == nullptr || path[0] == '\0') { // skipping the test, UMF_TESTS_FSDAX_PATH is not set return ipcFsDaxTestParamsList; } @@ -140,7 +140,9 @@ static std::vector getIpcFsDaxTestParamsList(void) { GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(umfIpcTest); INSTANTIATE_TEST_SUITE_P(FileProviderDifferentPoolsTest, umfIpcTest, - ::testing::ValuesIn(ipcManyPoolsTestParamsList)); + ::testing::ValuesIn(ipcManyPoolsTestParamsList), + ipcTestParamsNameGen); INSTANTIATE_TEST_SUITE_P(FileProviderDifferentPoolsFSDAXTest, umfIpcTest, - ::testing::ValuesIn(getIpcFsDaxTestParamsList())); + ::testing::ValuesIn(getIpcFsDaxTestParamsList()), + ipcTestParamsNameGen); diff --git a/test/provider_fixed_memory.cpp b/test/provider_fixed_memory.cpp index 59dcbb4d8a..a72deda63a 100644 --- a/test/provider_fixed_memory.cpp +++ b/test/provider_fixed_memory.cpp @@ -2,19 +2,19 @@ // Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#include "base.hpp" +#include +#include +#include +#include +#include "base.hpp" +#include "provider.hpp" #include "test_helpers.h" #include "utils/cpp_helpers.hpp" #ifndef _WIN32 #include "test_helpers_linux.h" #endif -#include -#include -#include -#include - using umf_test::test; #define FIXED_BUFFER_SIZE (10 * utils_get_page_size()) @@ -39,23 +39,6 @@ static int compare_native_error_str(const char *message, int error) { return strncmp(message, error_str, len); } -using providerCreateExtParams = - std::tuple; - -static void providerCreateExt(providerCreateExtParams params, - umf_test::provider_unique_handle_t *handle) { - umf_memory_provider_handle_t hProvider = nullptr; - auto [provider_ops, provider_params] = params; - - auto ret = - umfMemoryProviderCreate(provider_ops, provider_params, &hProvider); - ASSERT_EQ(ret, UMF_RESULT_SUCCESS); - ASSERT_NE(hProvider, nullptr); - - *handle = umf_test::provider_unique_handle_t(hProvider, - &umfMemoryProviderDestroy); -} - struct FixedProviderTest : umf_test::test, ::testing::WithParamInterface { @@ -155,7 +138,8 @@ struct FixedProviderTest INSTANTIATE_TEST_SUITE_P(fixedProviderTest, FixedProviderTest, ::testing::Values(providerCreateExtParams{ - umfFixedMemoryProviderOps(), nullptr})); + umfFixedMemoryProviderOps(), nullptr}), + providerCreateExtParamsNameGen); TEST_P(FixedProviderTest, create_destroy) { // Creation and destruction are handled in SetUp and TearDown @@ -279,6 +263,40 @@ TEST_P(FixedProviderTest, get_name) { ASSERT_STREQ(name, "FIXED"); } +TEST(FixedProviderName, custom_name) { + size_t mem_size = utils_get_page_size(); + void *buffer = malloc(mem_size); + ASSERT_NE(buffer, nullptr); + + umf_fixed_memory_provider_params_handle_t params = nullptr; + auto ret = umfFixedMemoryProviderParamsCreate(buffer, mem_size, ¶ms); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *custom = "my_fixed"; + ret = umfFixedMemoryProviderParamsSetName(params, custom); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t prov = nullptr; + ret = umfMemoryProviderCreate(umfFixedMemoryProviderOps(), params, &prov); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *name = nullptr; + ret = umfMemoryProviderGetName(prov, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); + + umfMemoryProviderDestroy(prov); + umfFixedMemoryProviderParamsDestroy(params); + free(buffer); +} + +TEST(FixedProviderName, default_name_null_handle) { + const char *name = nullptr; + auto ret = umfFixedMemoryProviderOps()->get_name(nullptr, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "FIXED"); +} + TEST_P(FixedProviderTest, free_size_0_ptr_not_null) { umf_result_t umf_result = umfMemoryProviderFree(provider.get(), INVALID_PTR, 0); diff --git a/test/provider_os_memory.cpp b/test/provider_os_memory.cpp index c18148d620..49b023a741 100644 --- a/test/provider_os_memory.cpp +++ b/test/provider_os_memory.cpp @@ -2,19 +2,19 @@ // Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#include "base.hpp" - -#include "ipcFixtures.hpp" -#include "test_helpers.h" -#include "utils/cpp_helpers.hpp" - +#include #include #include #include #ifdef UMF_POOL_JEMALLOC_ENABLED #include #endif -#include + +#include "base.hpp" +#include "ipcFixtures.hpp" +#include "provider.hpp" +#include "test_helpers.h" +#include "utils/cpp_helpers.hpp" using umf_test::test; @@ -45,23 +45,6 @@ static int compare_native_error_str(const char *message, int error) { return strncmp(message, error_str, len); } -using providerCreateExtParams = - std::tuple; - -static void providerCreateExt(providerCreateExtParams params, - umf_test::provider_unique_handle_t *handle) { - umf_memory_provider_handle_t hProvider = nullptr; - auto [provider_ops, provider_params] = params; - - auto ret = - umfMemoryProviderCreate(provider_ops, provider_params, &hProvider); - ASSERT_EQ(ret, UMF_RESULT_SUCCESS); - ASSERT_NE(hProvider, nullptr); - - *handle = umf_test::provider_unique_handle_t(hProvider, - &umfMemoryProviderDestroy); -} - struct umfProviderTest : umf_test::test, ::testing::WithParamInterface { @@ -219,9 +202,9 @@ TEST_F(test, create_ZERO_WEIGHT_PARTITION) { os_memory_provider_params, &p, 1); EXPECT_EQ(umf_result, UMF_RESULT_SUCCESS); - umf_result = umfMemoryProviderCreate(umfOsMemoryProviderOps(), - &os_memory_provider_params, - &os_memory_provider); + umf_result = + umfMemoryProviderCreate(umfOsMemoryProviderOps(), + os_memory_provider_params, &os_memory_provider); umfOsMemoryProviderParamsDestroy(os_memory_provider_params); @@ -248,7 +231,8 @@ auto defaultParams = createOsMemoryProviderParams(); INSTANTIATE_TEST_SUITE_P(osProviderTest, umfProviderTest, ::testing::Values(providerCreateExtParams{ - umfOsMemoryProviderOps(), defaultParams.get()})); + umfOsMemoryProviderOps(), defaultParams.get()}), + providerCreateExtParamsNameGen); TEST_P(umfProviderTest, create_destroy) {} @@ -335,6 +319,32 @@ TEST_P(umfProviderTest, get_name) { ASSERT_STREQ(name, "OS"); } +TEST(OsProviderName, custom_name) { + auto params = createOsMemoryProviderParams(); + ASSERT_NE(params.get(), nullptr); + const char *custom = "my_os"; + auto ret = umfOsMemoryProviderParamsSetName(params.get(), custom); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t prov = nullptr; + ret = + umfMemoryProviderCreate(umfOsMemoryProviderOps(), params.get(), &prov); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + const char *name = nullptr; + ret = umfMemoryProviderGetName(prov, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); + umfMemoryProviderDestroy(prov); +} + +TEST(OsProviderName, default_name_null_handle) { + const char *name = nullptr; + auto ret = umfOsMemoryProviderOps()->get_name(nullptr, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "OS"); +} + TEST_P(umfProviderTest, free_size_0_ptr_not_null) { umf_result_t umf_result = umfMemoryProviderFree(provider.get(), INVALID_PTR, 0); @@ -557,4 +567,5 @@ static std::vector ipcTestParamsList = { }; INSTANTIATE_TEST_SUITE_P(osProviderTest, umfIpcTest, - ::testing::ValuesIn(ipcTestParamsList)); + ::testing::ValuesIn(ipcTestParamsList), + ipcTestParamsNameGen); diff --git a/test/provider_os_memory_config.cpp b/test/provider_os_memory_config.cpp index ed34566182..e5e2fdf973 100644 --- a/test/provider_os_memory_config.cpp +++ b/test/provider_os_memory_config.cpp @@ -1,20 +1,20 @@ /* * - * Copyright (C) 2024 Intel Corporation + * Copyright (C) 2024-2025 Intel Corporation * * Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception * */ +#include +#include + #include "base.hpp" #include #include -#include -#include - static constexpr size_t allocSize = 4096; struct providerConfigTest : testing::Test { @@ -238,11 +238,17 @@ struct providerConfigTestNumaMode } }; -INSTANTIATE_TEST_SUITE_P(numa_modes, providerConfigTestNumaMode, - testing::Values(UMF_NUMA_MODE_DEFAULT, - UMF_NUMA_MODE_BIND, - UMF_NUMA_MODE_INTERLEAVE, - UMF_NUMA_MODE_LOCAL)); +INSTANTIATE_TEST_SUITE_P( + numa_modes, providerConfigTestNumaMode, + testing::Values(UMF_NUMA_MODE_DEFAULT, UMF_NUMA_MODE_BIND, + UMF_NUMA_MODE_INTERLEAVE, UMF_NUMA_MODE_LOCAL), + ([](auto const &info) -> std::string { + static const char *names[] = { + "UMF_NUMA_MODE_DEFAULT", "UMF_NUMA_MODE_BIND", + "UMF_NUMA_MODE_INTERLEAVE", "UMF_NUMA_MODE_LOCAL"}; + return names[info.index]; + })); + #ifndef MPOL_LOCAL #define MPOL_LOCAL 4 #endif diff --git a/test/provider_os_memory_multiple_numa_nodes.cpp b/test/provider_os_memory_multiple_numa_nodes.cpp index cfc58f2f06..69db6759f3 100644 --- a/test/provider_os_memory_multiple_numa_nodes.cpp +++ b/test/provider_os_memory_multiple_numa_nodes.cpp @@ -1,11 +1,7 @@ -// Copyright (C) 2024 Intel Corporation +// Copyright (C) 2024-2025 Intel Corporation // Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#include "base.hpp" -#include "numa_helpers.hpp" -#include "test_helpers.h" - #include #include #include @@ -14,6 +10,10 @@ #include +#include "base.hpp" +#include "numa_helpers.hpp" +#include "test_helpers.h" + std::vector get_available_numa_nodes() { if (numa_available() == -1 || numa_all_nodes_ptr == nullptr) { return std::vector(); @@ -147,7 +147,10 @@ using the macro) GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(testNumaOnEachNode); INSTANTIATE_TEST_SUITE_P(testNumaNodesAllocations, testNumaOnEachNode, - ::testing::ValuesIn(get_available_numa_nodes())); + ::testing::ValuesIn(get_available_numa_nodes()), + ([](auto const &info) -> std::string { + return "numa_" + std::to_string(info.param); + })); // Test for allocations on numa nodes. It will be executed on each of // the available numa nodes. @@ -252,6 +255,7 @@ TEST_P(testNumaOnEachNode, checkModeInterleaveSingleNode) { constexpr int pages_num = 1024; size_t page_size = sysconf(_SC_PAGE_SIZE); + ASSERT_GT(page_size, 0); umf_result_t umf_result; umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; @@ -293,7 +297,10 @@ struct testNumaOnEachCpu : testNuma, testing::WithParamInterface { }; INSTANTIATE_TEST_SUITE_P(testNumaNodesAllocationsAllCpus, testNumaOnEachCpu, - ::testing::ValuesIn(get_available_cpus())); + ::testing::ValuesIn(get_available_cpus()), + ([](auto const &info) -> std::string { + return "cpu_" + std::to_string(info.param); + })); // Test for allocation on numa node with mode preferred and an empty nodeset. // For the empty nodeset the memory is allocated on the node of the CPU that @@ -421,6 +428,7 @@ TEST_F(testNuma, checkModeDefault) { TEST_F(testNuma, checkModeInterleave) { constexpr int pages_num = 1024; size_t page_size = sysconf(_SC_PAGE_SIZE); + ASSERT_GT(page_size, 0); umf_result_t umf_result; umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr; diff --git a/test/provider_os_memory_not_impl.cpp b/test/provider_os_memory_not_impl.cpp deleted file mode 100644 index 127ba32e48..0000000000 --- a/test/provider_os_memory_not_impl.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2024-2025 Intel Corporation -// Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -#include "base.hpp" - -#include - -using umf_test::test; - -TEST_F(test, os_provider_not_implemented) { - umf_os_memory_provider_params_handle_t params = nullptr; - umf_result_t umf_result = umfOsMemoryProviderParamsCreate(¶ms); - EXPECT_EQ(umf_result, UMF_RESULT_ERROR_NOT_SUPPORTED); - EXPECT_EQ(params, nullptr); - - umf_result = umfOsMemoryProviderParamsDestroy(params); - EXPECT_EQ(umf_result, UMF_RESULT_ERROR_NOT_SUPPORTED); - - umf_result = umfOsMemoryProviderParamsSetProtection(params, 0); - EXPECT_EQ(umf_result, UMF_RESULT_ERROR_NOT_SUPPORTED); - - umf_result = - umfOsMemoryProviderParamsSetVisibility(params, UMF_MEM_MAP_PRIVATE); - EXPECT_EQ(umf_result, UMF_RESULT_ERROR_NOT_SUPPORTED); - - umf_result = umfOsMemoryProviderParamsSetShmName(params, "shm_name"); - EXPECT_EQ(umf_result, UMF_RESULT_ERROR_NOT_SUPPORTED); - - umf_result = umfOsMemoryProviderParamsSetNumaList(params, nullptr, 0); - EXPECT_EQ(umf_result, UMF_RESULT_ERROR_NOT_SUPPORTED); - - umf_result = - umfOsMemoryProviderParamsSetNumaMode(params, UMF_NUMA_MODE_DEFAULT); - EXPECT_EQ(umf_result, UMF_RESULT_ERROR_NOT_SUPPORTED); - - umf_result = umfOsMemoryProviderParamsSetPartSize(params, 4096); - EXPECT_EQ(umf_result, UMF_RESULT_ERROR_NOT_SUPPORTED); - - umf_numa_split_partition_t partitions[1]; - umf_result = umfOsMemoryProviderParamsSetPartitions(params, partitions, 1); - EXPECT_EQ(umf_result, UMF_RESULT_ERROR_NOT_SUPPORTED); - - const umf_memory_provider_ops_t *ops = umfOsMemoryProviderOps(); - EXPECT_EQ(ops, nullptr); -} diff --git a/test/provider_tracking.cpp b/test/provider_tracking.cpp index db186e15f4..52d5eeb8fd 100644 --- a/test/provider_tracking.cpp +++ b/test/provider_tracking.cpp @@ -2,40 +2,23 @@ // Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -#include "base.hpp" +#include +#include +#include +#include "base.hpp" +#include "provider.hpp" #include "test_helpers.h" #include "utils/cpp_helpers.hpp" #ifndef _WIN32 #include "test_helpers_linux.h" #endif -#include -#include -#include - using umf_test::test; #define FIXED_BUFFER_SIZE (512 * utils_get_page_size()) #define INVALID_PTR ((void *)0x01) -using providerCreateExtParams = - std::tuple; - -static void providerCreateExt(providerCreateExtParams params, - umf_test::provider_unique_handle_t *handle) { - umf_memory_provider_handle_t hProvider = nullptr; - auto [provider_ops, provider_params] = params; - - auto ret = - umfMemoryProviderCreate(provider_ops, provider_params, &hProvider); - ASSERT_EQ(ret, UMF_RESULT_SUCCESS); - ASSERT_NE(hProvider, nullptr); - - *handle = umf_test::provider_unique_handle_t(hProvider, - &umfMemoryProviderDestroy); -} - struct TrackingProviderTest : umf_test::test, ::testing::WithParamInterface { @@ -121,7 +104,8 @@ createPoolFromAllocation(void *ptr0, size_t size1, INSTANTIATE_TEST_SUITE_P(trackingProviderTest, TrackingProviderTest, ::testing::Values(providerCreateExtParams{ - umfFixedMemoryProviderOps(), nullptr})); + umfFixedMemoryProviderOps(), nullptr}), + providerCreateExtParamsNameGen); TEST_P(TrackingProviderTest, create_destroy) { // Creation and destruction are handled in SetUp and TearDown diff --git a/test/provider_tracking_fixture_tests.cpp b/test/provider_tracking_fixture_tests.cpp index 5a26f77907..60ff9a1761 100644 --- a/test/provider_tracking_fixture_tests.cpp +++ b/test/provider_tracking_fixture_tests.cpp @@ -92,10 +92,12 @@ INSTANTIATE_TEST_SUITE_P(TrackingProviderPoolTest, umfPoolTest, ::testing::Values(poolCreateExtParams{ umfProxyPoolOps(), nullptr, nullptr, &PROVIDER_FROM_POOL_OPS, - providerFromPoolParamsCreate, nullptr})); + providerFromPoolParamsCreate, nullptr}), + poolCreateExtParamsNameGen); INSTANTIATE_TEST_SUITE_P(TrackingProviderMultiPoolTest, umfMultiPoolTest, ::testing::Values(poolCreateExtParams{ umfProxyPoolOps(), nullptr, nullptr, &PROVIDER_FROM_POOL_OPS, - providerFromPoolParamsCreate, nullptr})); + providerFromPoolParamsCreate, nullptr}), + poolCreateExtParamsNameGen); diff --git a/test/providers/ipc_cuda_prov.py b/test/providers/ipc_cuda_prov.py new file mode 100755 index 0000000000..9200278cd8 --- /dev/null +++ b/test/providers/ipc_cuda_prov.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2025 Intel Corporation +# +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# + +import os +import sys +import time +import subprocess # nosec B404 +import platform + + +def main(): + # Port should be a number from the range <1024, 65535> + # Use PROCESS_ID environment variable if set, otherwise use current PID + pid = int(os.environ.get("PROCESS_ID", os.getpid())) + port = 1024 + (pid % (65535 - 1024)) + + # Set UMF_LOG environment variable + os.environ["UMF_LOG"] = "level:debug;flush:debug;output:stderr;pid:yes" + + build_type = os.environ.get("BUILD_TYPE", "Debug") + + # Determine executable extension based on platform + exe_ext = ".exe" if platform.system() == "Windows" else "" + + print(f"Starting test_ipc_cuda_prov CONSUMER on port {port} ...") + + # Start consumer process + consumer_cmd = [f"./{build_type}/test_ipc_cuda_prov_consumer{exe_ext}", str(port)] + with open("consumer_log.txt", "w") as consumer_log: + consumer_proc = subprocess.Popen( # nosec + consumer_cmd, stdout=consumer_log, stderr=subprocess.STDOUT + ) + + print("Waiting 5 sec ...") + time.sleep(5) + + print(f"Starting test_ipc_cuda_prov PRODUCER on port {port} ...") + + # Start producer process + producer_cmd = [f"./{build_type}/test_ipc_cuda_prov_producer{exe_ext}", str(port)] + with open("producer_log.txt", "w") as producer_log: + producer_proc = subprocess.Popen( # nosec + producer_cmd, stdout=producer_log, stderr=subprocess.STDOUT + ) + + print("Waiting 10 sec for the consumer and producer to finish ...") + time.sleep(10) + + # Wait for processes to complete + consumer_proc.wait() + producer_proc.wait() + + print("Test finished") + + # Display consumer log + print("Consumer log:") + try: + with open("consumer_log.txt", "r") as f: + print(f.read()) + except FileNotFoundError: + print("consumer_log.txt not found") + + # Display producer log + print("Producer log:") + try: + with open("producer_log.txt", "r") as f: + print(f.read()) + except FileNotFoundError: + print("producer_log.txt not found") + + # Check for errors in logs + error_found = False + for log_file in ["consumer_log.txt", "producer_log.txt"]: + try: + with open(log_file, "r") as f: + content = f.read().upper() + if "ERROR" in content or "FATAL" in content: + error_found = True + break + except FileNotFoundError: + continue + + if error_found: + print("Test failed: ERROR or FATAL found in logs.") + sys.exit(1) + + print("Test passed: No errors found in logs.") + + +if __name__ == "__main__": + main() diff --git a/test/providers/ipc_level_zero_prov.py b/test/providers/ipc_level_zero_prov.py new file mode 100755 index 0000000000..b08bf5bdf8 --- /dev/null +++ b/test/providers/ipc_level_zero_prov.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2025 Intel Corporation +# +# Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# + +import os +import sys +import time +import subprocess # nosec B404 +import platform + + +def main(): + # Port should be a number from the range <1024, 65535> + # Use PROCESS_ID environment variable if set, otherwise use current PID + pid = int(os.environ.get("PROCESS_ID", os.getpid())) + port = 1024 + (pid % (65535 - 1024)) + + # Set UMF_LOG environment variable + os.environ["UMF_LOG"] = "level:debug;flush:debug;output:stderr;pid:yes" + + build_type = os.environ.get("BUILD_TYPE", "Debug") + + # Determine executable extension based on platform + exe_ext = ".exe" if platform.system() == "Windows" else "" + + print(f"Starting test_ipc_level_zero_prov CONSUMER on port {port} ...") + + # Start consumer process + consumer_cmd = [ + f"./{build_type}/test_ipc_level_zero_prov_consumer{exe_ext}", + str(port), + ] + with open("consumer_log.txt", "w") as consumer_log: + consumer_proc = subprocess.Popen( # nosec + consumer_cmd, stdout=consumer_log, stderr=subprocess.STDOUT + ) + + print("Waiting 5 sec ...") + time.sleep(5) + + print(f"Starting test_ipc_level_zero_prov PRODUCER on port {port} ...") + + # Start producer process + producer_cmd = [ + f"./{build_type}/test_ipc_level_zero_prov_producer{exe_ext}", + str(port), + ] + with open("producer_log.txt", "w") as producer_log: + producer_proc = subprocess.Popen( # nosec + producer_cmd, stdout=producer_log, stderr=subprocess.STDOUT + ) + + print("Waiting 10 sec for the consumer and producer to finish ...") + time.sleep(10) + + # Wait for processes to complete + consumer_proc.wait() + producer_proc.wait() + + print("Test finished") + + # Display consumer log + print("Consumer log:") + try: + with open("consumer_log.txt", "r") as f: + print(f.read()) + except FileNotFoundError: + print("consumer_log.txt not found") + + # Display producer log + print("Producer log:") + try: + with open("producer_log.txt", "r") as f: + print(f.read()) + except FileNotFoundError: + print("producer_log.txt not found") + + # Check for errors in logs + error_found = False + for log_file in ["consumer_log.txt", "producer_log.txt"]: + try: + with open(log_file, "r") as f: + content = f.read().upper() + if "ERROR" in content or "FATAL" in content: + error_found = True + break + except FileNotFoundError: + continue + + if error_found: + print("Test failed: ERROR or FATAL found in logs.") + sys.exit(1) + + print("Test passed: No errors found in logs.") + + +if __name__ == "__main__": + main() diff --git a/test/providers/ipc_level_zero_prov_consumer.c b/test/providers/ipc_level_zero_prov_consumer.c index 5fb2128815..abe08e7c82 100644 --- a/test/providers/ipc_level_zero_prov_consumer.c +++ b/test/providers/ipc_level_zero_prov_consumer.c @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -88,6 +89,15 @@ int main(int argc, char *argv[]) { goto destroy_provider_params; } +#ifdef _WIN32 + // NOTE: On Windows, we must use the import / export memory exchange policy + // because IPC currently does not work + int use_import_export_for_IPC = 1; + umfCtlSet( + "umf.provider.default.LEVEL_ZERO.params.use_import_export_for_IPC", + &use_import_export_for_IPC, sizeof(use_import_export_for_IPC)); +#endif + umf_disjoint_pool_params_handle_t pool_params = NULL; umf_result = umfDisjointPoolParamsCreate(&pool_params); diff --git a/test/providers/ipc_level_zero_prov_producer.c b/test/providers/ipc_level_zero_prov_producer.c index e6ffcf2ed6..535020eaeb 100644 --- a/test/providers/ipc_level_zero_prov_producer.c +++ b/test/providers/ipc_level_zero_prov_producer.c @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -88,6 +89,15 @@ int main(int argc, char *argv[]) { goto destroy_provider_params; } +#ifdef _WIN32 + // NOTE: On Windows, we must use the import/export memory exchange policy + // because IPC currently does not work + int use_import_export_for_IPC = 1; + umfCtlSet( + "umf.provider.default.LEVEL_ZERO.params.use_import_export_for_IPC", + &use_import_export_for_IPC, sizeof(use_import_export_for_IPC)); +#endif + umf_disjoint_pool_params_handle_t pool_params = NULL; umf_result = umfDisjointPoolParamsCreate(&pool_params); diff --git a/test/providers/provider_cuda.cpp b/test/providers/provider_cuda.cpp index fd41603320..8a3d729eb0 100644 --- a/test/providers/provider_cuda.cpp +++ b/test/providers/provider_cuda.cpp @@ -9,13 +9,14 @@ #include +#include +#include #include #include "cuda_helpers.h" #include "ipcFixtures.hpp" #include "pool.hpp" #include "utils_load_library.h" -#include using umf_test::test; using namespace umf_test; @@ -107,7 +108,7 @@ class CUDAMemoryAccessor : public MemoryAccessor { : hDevice_(hDevice), hContext_(hContext) {} void fill(void *ptr, size_t size, const void *pattern, - size_t pattern_size) { + size_t pattern_size) override { ASSERT_NE(hContext_, nullptr); ASSERT_GE(hDevice_, -1); ASSERT_NE(ptr, nullptr); @@ -117,7 +118,7 @@ class CUDAMemoryAccessor : public MemoryAccessor { ASSERT_EQ(ret, 0); } - void copy(void *dst_ptr, void *src_ptr, size_t size) { + void copy(void *dst_ptr, void *src_ptr, size_t size) override { ASSERT_NE(hContext_, nullptr); ASSERT_GE(hDevice_, -1); ASSERT_NE(dst_ptr, nullptr); @@ -127,6 +128,8 @@ class CUDAMemoryAccessor : public MemoryAccessor { ASSERT_EQ(ret, 0); } + const char *getName() override { return "CUDAMemoryAccessor"; } + private: CUdevice hDevice_; CUcontext hContext_; @@ -332,10 +335,34 @@ TEST_P(umfCUDAProviderTest, ctl_stats) { sizeof(peak), provider); ASSERT_EQ(ret, UMF_RESULT_SUCCESS); ASSERT_EQ(peak, 0u); + umfMemoryProviderDestroy(provider); +} + +TEST_P(umfCUDAProviderTest, custom_name) { + const char *custom = "my_cuda"; + ASSERT_EQ(umfCUDAMemoryProviderParamsSetName(params, custom), + UMF_RESULT_SUCCESS); + umf_memory_provider_handle_t provider = nullptr; + umf_result_t res = + umfMemoryProviderCreate(umfCUDAMemoryProviderOps(), params, &provider); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(provider, nullptr); + + const char *name = nullptr; + res = umfMemoryProviderGetName(provider, &name); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); umfMemoryProviderDestroy(provider); } +TEST(umfCUDAProviderOps, default_name_null_handle) { + const char *name = nullptr; + auto ret = umfCUDAMemoryProviderOps()->get_name(nullptr, &name); + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "CUDA"); +} + TEST_P(umfCUDAProviderTest, allocInvalidSize) { CUcontext expected_current_context = get_current_context(); // create CUDA provider @@ -556,6 +583,87 @@ TEST_P(umfCUDAProviderTest, multiContext) { ASSERT_EQ(ret, 0); } +TEST_P(umfCUDAProviderTest, memProps) { + umf_memory_provider_handle_t provider = nullptr; + umf_result_t umf_result = + umfMemoryProviderCreate(umfCUDAMemoryProviderOps(), params, &provider); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_NE(provider, nullptr); + + umf_memory_pool_handle_t pool = NULL; + umf_result = umfPoolCreate(umfProxyPoolOps(), provider, NULL, 0, &pool); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + + size_t size = 1024; + void *ptr = umfPoolMalloc(pool, size); + ASSERT_NE(ptr, nullptr); + + umf_memory_properties_handle_t props_handle = NULL; + umf_result_t result = umfGetMemoryPropertiesHandle(ptr, &props_handle); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_NE(props_handle, nullptr); + + umf_usm_memory_type_t type = UMF_MEMORY_TYPE_UNKNOWN; + result = umfGetMemoryProperty( + props_handle, UMF_MEMORY_PROPERTY_POINTER_TYPE, &type, sizeof(type)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(type, expected_memory_type); + + void *baseAddress = nullptr; + result = + umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BASE_ADDRESS, + &baseAddress, sizeof(baseAddress)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(baseAddress, ptr); + + size_t baseSize = 0; + result = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BASE_SIZE, + &baseSize, sizeof(baseSize)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_GE(baseSize, size); + + int64_t bufferId = 0; + result = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BUFFER_ID, + &bufferId, sizeof(bufferId)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_GE(bufferId, 0); + + if (expected_memory_type != UMF_MEMORY_TYPE_HOST) { + CUdevice device = -1; + result = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_DEVICE, + &device, sizeof(device)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(device, cudaTestHelper.get_test_device()); + } + + CUcontext context = nullptr; + result = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_CONTEXT, + &context, sizeof(context)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(context, cudaTestHelper.get_test_context()); + + // check the props of pointer from the middle of alloc + void *midPtr = static_cast(ptr) + size / 2; + result = umfGetMemoryPropertiesHandle(midPtr, &props_handle); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_NE(props_handle, nullptr); + result = umfGetMemoryProperty( + props_handle, UMF_MEMORY_PROPERTY_POINTER_TYPE, &type, sizeof(type)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(type, expected_memory_type); + + result = + umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BASE_ADDRESS, + &baseAddress, sizeof(baseAddress)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(baseAddress, ptr); + + umfFree(ptr); + + umfPoolDestroy(pool); + umfMemoryProviderDestroy(provider); +} + struct umfCUDAProviderAllocFlagsTest : umf_test::test, ::testing::WithParamInterface< @@ -668,9 +776,16 @@ TEST_P(umfCUDAProviderAllocFlagsTest, reuseParams) { // TODO add tests that mixes CUDA Memory Provider and Disjoint Pool INSTANTIATE_TEST_SUITE_P(umfCUDAProviderTestSuite, umfCUDAProviderTest, - ::testing::Values(UMF_MEMORY_TYPE_DEVICE, + ::testing::Values(UMF_MEMORY_TYPE_HOST, UMF_MEMORY_TYPE_SHARED, - UMF_MEMORY_TYPE_HOST)); + UMF_MEMORY_TYPE_DEVICE), + ([](auto const &info) -> std::string { + static const char *names[] = { + "UMF_MEMORY_TYPE_HOST", + "UMF_MEMORY_TYPE_SHARED", + "UMF_MEMORY_TYPE_DEVICE"}; + return names[info.index]; + })); INSTANTIATE_TEST_SUITE_P( umfCUDAProviderAllocFlagsTestSuite, umfCUDAProviderAllocFlagsTest, @@ -679,7 +794,13 @@ INSTANTIATE_TEST_SUITE_P( std::make_tuple(UMF_MEMORY_TYPE_SHARED, CU_MEM_ATTACH_HOST), std::make_tuple(UMF_MEMORY_TYPE_HOST, CU_MEMHOSTALLOC_PORTABLE), std::make_tuple(UMF_MEMORY_TYPE_HOST, CU_MEMHOSTALLOC_DEVICEMAP), - std::make_tuple(UMF_MEMORY_TYPE_HOST, CU_MEMHOSTALLOC_WRITECOMBINED))); + std::make_tuple(UMF_MEMORY_TYPE_HOST, CU_MEMHOSTALLOC_WRITECOMBINED)), + ([](auto const &info) -> std::string { + static const char *names[] = {"SHARED_GLOBAL", "SHARED_HOST", + "HOST_PORTABLE", "HOST_DEVICEMAP", + "HOST_WRITECOMBINED"}; + return names[info.index]; + })); // TODO: add IPC API GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(umfIpcTest); diff --git a/test/providers/provider_level_zero.cpp b/test/providers/provider_level_zero.cpp index d56b3dced5..1894d0aa4e 100644 --- a/test/providers/provider_level_zero.cpp +++ b/test/providers/provider_level_zero.cpp @@ -9,13 +9,14 @@ #include +#include +#include #include #include "ipcFixtures.hpp" #include "pool.hpp" #include "utils_level_zero.h" #include "utils_load_library.h" -#include using umf_test::test; using namespace umf_test; @@ -114,7 +115,14 @@ struct LevelZeroProviderInit INSTANTIATE_TEST_SUITE_P(, LevelZeroProviderInit, ::testing::Values(UMF_MEMORY_TYPE_HOST, UMF_MEMORY_TYPE_DEVICE, - UMF_MEMORY_TYPE_SHARED)); + UMF_MEMORY_TYPE_SHARED), + ([](auto const &info) -> std::string { + static const char *names[] = { + "UMF_MEMORY_TYPE_HOST", + "UMF_MEMORY_TYPE_DEVICE", + "UMF_MEMORY_TYPE_SHARED"}; + return names[info.index]; + })); TEST_P(LevelZeroProviderInit, FailNullContext) { const umf_memory_provider_ops_t *ops = umfLevelZeroMemoryProviderOps(); @@ -196,28 +204,44 @@ TEST_F(LevelZeroProviderInit, FailNonNullDevice) { umfLevelZeroMemoryProviderParamsDestroy(hParams); } -TEST_F(test, FailMismatchedResidentHandlesCount) { +static void +invalidResidentDevicesHandlesTestHelper(ze_device_handle_t *hDevices, + uint32_t deviceCount) { const umf_memory_provider_ops_t *ops = umfLevelZeroMemoryProviderOps(); ASSERT_NE(ops, nullptr); umf_level_zero_memory_provider_params_handle_t hParams = nullptr; - umf_result_t result = umfLevelZeroMemoryProviderParamsCreate(&hParams); - ASSERT_EQ(result, UMF_RESULT_SUCCESS); + const umf_result_t create_result = + umfLevelZeroMemoryProviderParamsCreate(&hParams); + ASSERT_EQ(create_result, UMF_RESULT_SUCCESS); - result = umfLevelZeroMemoryProviderParamsSetResidentDevices(hParams, - nullptr, 99); - ASSERT_EQ(result, UMF_RESULT_ERROR_INVALID_ARGUMENT); + const umf_result_t set_resident_result = + umfLevelZeroMemoryProviderParamsSetResidentDevices(hParams, hDevices, + deviceCount); + ASSERT_EQ(set_resident_result, UMF_RESULT_ERROR_INVALID_ARGUMENT); umfLevelZeroMemoryProviderParamsDestroy(hParams); } +TEST_F(test, FailMismatchedResidentHandlesCount) { + invalidResidentDevicesHandlesTestHelper(nullptr, 99); +} + +TEST_F(test, FailRedundantResidentDeviceHandles) { + std::vector hDevices{ + reinterpret_cast(0x100), + reinterpret_cast(0x101), + reinterpret_cast(0x100)}; + invalidResidentDevicesHandlesTestHelper(hDevices.data(), 3); +} + class LevelZeroMemoryAccessor : public MemoryAccessor { public: LevelZeroMemoryAccessor(ze_context_handle_t hContext, ze_device_handle_t hDevice) : hDevice_(hDevice), hContext_(hContext) {} void fill(void *ptr, size_t size, const void *pattern, - size_t pattern_size) { + size_t pattern_size) override { ASSERT_NE(ptr, nullptr); int ret = utils_ze_level_zero_fill(hContext_, hDevice_, ptr, size, @@ -225,7 +249,7 @@ class LevelZeroMemoryAccessor : public MemoryAccessor { ASSERT_EQ(ret, 0); } - void copy(void *dst_ptr, void *src_ptr, size_t size) { + void copy(void *dst_ptr, void *src_ptr, size_t size) override { ASSERT_NE(dst_ptr, nullptr); ASSERT_NE(src_ptr, nullptr); @@ -234,6 +258,8 @@ class LevelZeroMemoryAccessor : public MemoryAccessor { ASSERT_EQ(ret, 0); } + const char *getName() override { return "LevelZeroMemoryAccessor"; } + private: ze_device_handle_t hDevice_; ze_context_handle_t hContext_; @@ -247,6 +273,7 @@ struct umfLevelZeroProviderTest test::SetUp(); umf_usm_memory_type_t memory_type = this->GetParam(); + umfExpectedMemoryType = memory_type; params = nullptr; memAccessor = nullptr; @@ -299,6 +326,7 @@ struct umfLevelZeroProviderTest std::unique_ptr memAccessor = nullptr; ze_context_handle_t hContext = nullptr; ze_memory_type_t zeMemoryTypeExpected = ZE_MEMORY_TYPE_UNKNOWN; + umf_usm_memory_type_t umfExpectedMemoryType = UMF_MEMORY_TYPE_UNKNOWN; }; GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(umfLevelZeroProviderTest); @@ -445,10 +473,70 @@ TEST_P(umfLevelZeroProviderTest, ctl_stats) { sizeof(peak), provider); ASSERT_EQ(ret, UMF_RESULT_SUCCESS); ASSERT_EQ(peak, 0u); + umfMemoryProviderDestroy(provider); +} +TEST_P(umfLevelZeroProviderTest, ctl_use_import_export_for_IPC) { + umf_memory_provider_handle_t provider = nullptr; + umf_result_t ret = umfMemoryProviderCreate(umfLevelZeroMemoryProviderOps(), + params, &provider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_NE(provider, nullptr); + + // Test reading the default value (0 = IPC) + int use_import_export_for_IPC = 1; // Set to invalid value first + ret = + umfCtlGet("umf.provider.by_handle.{}.params.use_import_export_for_IPC", + &use_import_export_for_IPC, sizeof(use_import_export_for_IPC), + provider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(use_import_export_for_IPC, 0); // Default is IPC (0) + + // Test writing a new value (1 = import/export) + int new_policy = 1; + ret = + umfCtlSet("umf.provider.by_handle.{}.params.use_import_export_for_IPC", + &new_policy, sizeof(new_policy), provider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + + // Test reading the updated value + use_import_export_for_IPC = 0; // Set to different value first + ret = + umfCtlGet("umf.provider.by_handle.{}.params.use_import_export_for_IPC", + &use_import_export_for_IPC, sizeof(use_import_export_for_IPC), + provider); + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); + ASSERT_EQ(use_import_export_for_IPC, 1); // Should be import/export (1) + + umfMemoryProviderDestroy(provider); +} + +TEST_P(umfLevelZeroProviderTest, custom_name) { + const char *custom = "my_level_zero"; + ASSERT_EQ(umfLevelZeroMemoryProviderParamsSetName(params, custom), + UMF_RESULT_SUCCESS); + + umf_memory_provider_handle_t provider = nullptr; + umf_result_t res = umfMemoryProviderCreate(umfLevelZeroMemoryProviderOps(), + params, &provider); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + ASSERT_NE(provider, nullptr); + + const char *name = nullptr; + res = umfMemoryProviderGetName(provider, &name); + ASSERT_EQ(res, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, custom); umfMemoryProviderDestroy(provider); } +TEST(umfLevelZeroProviderOps, default_name_null_handle) { + const char *name = nullptr; + auto ret = umfLevelZeroMemoryProviderOps()->get_name(nullptr, &name); + + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); + EXPECT_STREQ(name, "LEVEL_ZERO"); +} + TEST_P(umfLevelZeroProviderTest, allocInvalidSize) { umf_memory_provider_handle_t provider = nullptr; umf_result_t umf_result = umfMemoryProviderCreate( @@ -544,13 +632,100 @@ TEST_P(umfLevelZeroProviderTest, setDeviceOrdinalValid) { } } +TEST_P(umfLevelZeroProviderTest, memProps) { + umf_memory_provider_handle_t provider = nullptr; + umf_result_t umf_result = umfMemoryProviderCreate( + umfLevelZeroMemoryProviderOps(), params, &provider); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + ASSERT_NE(provider, nullptr); + + umf_memory_pool_handle_t pool = NULL; + umf_result = umfPoolCreate(umfProxyPoolOps(), provider, NULL, 0, &pool); + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); + + size_t size = 1024; + void *ptr = umfPoolMalloc(pool, size); + ASSERT_NE(ptr, nullptr); + + umf_memory_properties_handle_t props_handle = NULL; + umf_result_t result = umfGetMemoryPropertiesHandle(ptr, &props_handle); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_NE(props_handle, nullptr); + + umf_usm_memory_type_t type = UMF_MEMORY_TYPE_UNKNOWN; + result = umfGetMemoryProperty( + props_handle, UMF_MEMORY_PROPERTY_POINTER_TYPE, &type, sizeof(type)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(type, umfExpectedMemoryType); + + // base address and size + void *baseAddress = nullptr; + result = + umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BASE_ADDRESS, + &baseAddress, sizeof(baseAddress)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(baseAddress, ptr); + + size_t baseSize = 0; + result = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BASE_SIZE, + &baseSize, sizeof(baseSize)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_GE(baseSize, size); + + int64_t bufferId = 0; + result = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BUFFER_ID, + &bufferId, sizeof(bufferId)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_GE(bufferId, 0); + + if (umfExpectedMemoryType != UMF_MEMORY_TYPE_HOST) { + ze_device_handle_t device = nullptr; + result = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_DEVICE, + &device, sizeof(device)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(device, l0TestHelper.get_test_device()); + } + + ze_context_handle_t context = nullptr; + result = umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_CONTEXT, + &context, sizeof(context)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(context, l0TestHelper.get_test_context()); + + // check the props of pointer from the middle of alloc + void *midPtr = static_cast(ptr) + size / 2; + result = umfGetMemoryPropertiesHandle(midPtr, &props_handle); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_NE(props_handle, nullptr); + result = umfGetMemoryProperty( + props_handle, UMF_MEMORY_PROPERTY_POINTER_TYPE, &type, sizeof(type)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(type, umfExpectedMemoryType); + + result = + umfGetMemoryProperty(props_handle, UMF_MEMORY_PROPERTY_BASE_ADDRESS, + &baseAddress, sizeof(baseAddress)); + ASSERT_EQ(result, UMF_RESULT_SUCCESS); + ASSERT_EQ(baseAddress, ptr); + + umfFree(ptr); + + umfPoolDestroy(pool); + umfMemoryProviderDestroy(provider); +} + // TODO add tests that mixes Level Zero Memory Provider and Disjoint Pool -INSTANTIATE_TEST_SUITE_P(umfLevelZeroProviderTestSuite, - umfLevelZeroProviderTest, - ::testing::Values(UMF_MEMORY_TYPE_DEVICE, - UMF_MEMORY_TYPE_SHARED, - UMF_MEMORY_TYPE_HOST)); +INSTANTIATE_TEST_SUITE_P( + umfLevelZeroProviderTestSuite, umfLevelZeroProviderTest, + ::testing::Values(UMF_MEMORY_TYPE_HOST, UMF_MEMORY_TYPE_SHARED, + UMF_MEMORY_TYPE_DEVICE), + ([](auto const &info) -> std::string { + static const char *names[] = {"UMF_MEMORY_TYPE_HOST", + "UMF_MEMORY_TYPE_SHARED", + "UMF_MEMORY_TYPE_DEVICE"}; + return names[info.index]; + })); LevelZeroTestHelper l0TestHelper; @@ -572,5 +747,6 @@ INSTANTIATE_TEST_SUITE_P( umfLevelZeroProviderTestSuite, umfIpcTest, ::testing::Values(ipcTestParams{ umfProxyPoolOps(), nullptr, nullptr, umfLevelZeroMemoryProviderOps(), - createL0ParamsDeviceMemory, destroyL0Params, &l0Accessor})); + createL0ParamsDeviceMemory, destroyL0Params, &l0Accessor}), + ipcTestParamsNameGen); #endif diff --git a/test/providers/provider_level_zero_residency.cpp b/test/providers/provider_level_zero_residency.cpp new file mode 100644 index 0000000000..88f761455d --- /dev/null +++ b/test/providers/provider_level_zero_residency.cpp @@ -0,0 +1,94 @@ +// Copyright (C) 2025 Intel Corporation +// Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "../common/level_zero_mocks.h" +#include "utils_level_zero.h" +#include "utils_log.h" + +#include "gtest/gtest.h" + +using namespace testing; + +class LevelZeroResidencyTestFixture : public Test { + protected: + StrictMock l0mock; + umf_memory_provider_handle_t provider = nullptr; + const ze_device_handle_t OUR_DEVICE; + + LevelZeroResidencyTestFixture() + : OUR_DEVICE(TestCreatePointer(777)) { + *MockedLevelZeroTestEnvironment::l0interface = &l0mock; + } + + void SetUp() override {} + void TearDown() override { + Mock::VerifyAndClearExpectations(&l0mock); + umfMemoryProviderDestroy(provider); + } +}; + +TEST_F(LevelZeroResidencyTestFixture, addNonexistingDeviceShouldSucceed) { + provider = l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_1, DEVICE_5, DEVICE_3}); + ASSERT_EQ(umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_2, + true), + UMF_RESULT_SUCCESS); +} + +TEST_F(LevelZeroResidencyTestFixture, addExistingDeviceShouldFail) { + provider = l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_1, DEVICE_5, DEVICE_3}); + ASSERT_EQ(umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_5, + true), + UMF_RESULT_ERROR_INVALID_ARGUMENT); +} + +TEST_F(LevelZeroResidencyTestFixture, removeNonexistingDeviceShouldFail) { + provider = l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_1, DEVICE_5, DEVICE_3}); + ASSERT_EQ(umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_0, + false), + UMF_RESULT_ERROR_INVALID_ARGUMENT); +} + +TEST_F(LevelZeroResidencyTestFixture, removeExistingDeviceShouldSucceed) { + provider = l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_1, DEVICE_5, DEVICE_3}); + ASSERT_EQ(umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_1, + false), + UMF_RESULT_SUCCESS); +} + +TEST_F(LevelZeroResidencyTestFixture, addDeviceTwiceShouldFail) { + provider = l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_1, DEVICE_5, DEVICE_3}); + ASSERT_EQ(umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_2, + true), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_2, + true), + UMF_RESULT_ERROR_INVALID_ARGUMENT); +} + +TEST_F(LevelZeroResidencyTestFixture, removeDeviceTwiceShouldFail) { + provider = l0mock.initializeMemoryProviderWithResidentDevices( + OUR_DEVICE, {DEVICE_1, DEVICE_5, DEVICE_3}); + ASSERT_EQ(umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_3, + false), + UMF_RESULT_SUCCESS); + ASSERT_EQ(umfLevelZeroMemoryProviderResidentDeviceChange(provider, DEVICE_3, + false), + UMF_RESULT_ERROR_INVALID_ARGUMENT); +} + +int main(int argc, char **argv) { + try { + InitGoogleTest(&argc, argv); + AddGlobalTestEnvironment(new MockedLevelZeroTestEnvironment); + return RUN_ALL_TESTS(); + } catch (...) { + std::cerr << "Exception occurred." << std::endl; + return 1; + } +} diff --git a/test/test_pool_null_params.cpp b/test/test_pool_null_params.cpp index b4eeae00d4..0952e029af 100644 --- a/test/test_pool_null_params.cpp +++ b/test/test_pool_null_params.cpp @@ -55,7 +55,22 @@ const PoolOpsFn poolOpsList[] = { &umfProxyPoolOps #endif &umfDisjointPoolOps}; + +static const char *poolOpsNames[] = { +#if defined(UMF_POOL_SCALABLE_ENABLED) + "umfScalablePoolOps", +#endif +#if defined(UMF_POOL_JEMALLOC_ENABLED) + "umfJemallocPoolOps", +#endif +#if defined(UMF_POOL_PROXY_ENABLED) + "umfProxyPoolOps", +#endif + "umfDisjointPoolOps"}; } // namespace INSTANTIATE_TEST_SUITE_P(poolNullParamsTest, PoolNullParamsTest, - ::testing::ValuesIn(poolOpsList)); + ::testing::ValuesIn(poolOpsList), + ([](auto const &info) -> std::string { + return poolOpsNames[info.index]; + })); diff --git a/test/test_valgrind.sh b/test/test_valgrind.sh index cff45bdec7..fa2610dd37 100755 --- a/test/test_valgrind.sh +++ b/test/test_valgrind.sh @@ -128,6 +128,9 @@ for test in $TESTS; do echo "- SKIPPED" continue; # skip testing helper binaries used by the ipc_file_prov_* tests ;; + ./test/test_ctl_env_app) + continue; # this is not a standalone test + ;; ./test/test_memspace_host_all) FILTER='--gtest_filter="-*allocsSpreadAcrossAllNumaNodes"' ;; @@ -140,9 +143,17 @@ for test in $TESTS; do ./test/test_memspace_highest_capacity) FILTER='--gtest_filter="-*highestCapacityVerify*"' ;; + ./test/test_pool_residency) + echo "- SKIPPED" + continue; # TODO: set UMF_ZE_LOADER_LIB_NAME as ctest does + ;; ./test/test_provider_os_memory_multiple_numa_nodes) FILTER='--gtest_filter="-testNuma.checkModeInterleave*:testNumaNodesAllocations/testNumaOnEachNode.checkNumaNodesAllocations*:testNumaNodesAllocations/testNumaOnEachNode.checkModePreferred*:testNumaNodesAllocations/testNumaOnEachNode.checkModeInterleaveSingleNode*:testNumaNodesAllocationsAllCpus/testNumaOnEachCpu.checkModePreferredEmptyNodeset*:testNumaNodesAllocationsAllCpus/testNumaOnEachCpu.checkModeLocal*"' ;; + ./test/test_provider_level_zero_residency) + echo "- SKIPPED" + continue; # TODO: set UMF_ZE_LOADER_LIB_NAME as ctest does + ;; ./test/test_memspace_highest_bandwidth) FILTER='--gtest_filter="-*allocLocalMt*"' ;; diff --git a/test/utils/cpp_helpers.hpp b/test/utils/cpp_helpers.hpp index ca1940e162..1f8ffb4e7a 100644 --- a/test/utils/cpp_helpers.hpp +++ b/test/utils/cpp_helpers.hpp @@ -10,11 +10,6 @@ #ifndef UMF_TEST_HELPERS_HPP #define UMF_TEST_HELPERS_HPP 1 -#include -#include -#include -#include - #include #include #include @@ -22,11 +17,17 @@ #include #include +#include +#include +#include +#include + namespace umf_test { using pool_unique_handle_t = std::unique_ptr>; + using provider_unique_handle_t = std::unique_ptr>; @@ -86,6 +87,8 @@ template umf_memory_pool_ops_t poolOpsBase() { UMF_ASSIGN_OP(ops, T, malloc_usable_size, UMF_RESULT_ERROR_UNKNOWN); UMF_ASSIGN_OP(ops, T, free, UMF_RESULT_ERROR_UNKNOWN); UMF_ASSIGN_OP(ops, T, get_last_allocation_error, UMF_RESULT_ERROR_UNKNOWN); + UMF_ASSIGN_OP(ops, T, ext_ctl, UMF_RESULT_ERROR_INVALID_CTL_PATH); + UMF_ASSIGN_OP(ops, T, ext_trim_memory, UMF_RESULT_ERROR_UNKNOWN); return ops; } @@ -111,6 +114,11 @@ template constexpr umf_memory_provider_ops_t providerOpsBase() { UMF_ASSIGN_OP(ops, T, ext_put_ipc_handle, UMF_RESULT_ERROR_UNKNOWN); UMF_ASSIGN_OP(ops, T, ext_open_ipc_handle, UMF_RESULT_ERROR_UNKNOWN); UMF_ASSIGN_OP(ops, T, ext_close_ipc_handle, UMF_RESULT_ERROR_UNKNOWN); + UMF_ASSIGN_OP(ops, T, ext_get_allocation_properties, + UMF_RESULT_ERROR_UNKNOWN); + UMF_ASSIGN_OP(ops, T, ext_get_allocation_properties_size, + UMF_RESULT_ERROR_UNKNOWN); + UMF_ASSIGN_OP(ops, T, ext_ctl, UMF_RESULT_ERROR_INVALID_CTL_PATH); return ops; } } // namespace detail diff --git a/test/utils/utils_log.cpp b/test/utils/utils_log.cpp index cce61db585..3a3dd19fa7 100644 --- a/test/utils/utils_log.cpp +++ b/test/utils/utils_log.cpp @@ -1,8 +1,11 @@ -// Copyright (C) 2024 Intel Corporation +// Copyright (C) 2024-2025 Intel Corporation // Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +#include + #include "base.hpp" +#include "ctl/ctl_internal.h" #include "test_helpers.h" #define MOCK_FILE_PTR (FILE *)0xBADBEEF @@ -26,6 +29,14 @@ FILE *expected_stream = stderr; int expect_fput_count = 0; int fput_count = 0; +// Some test of logging are disabled because log macros add filename and line +// number to logs in developer mode, therefore check on exact log content fail. +#ifdef UMF_DEVELOPER_MODE +#define DISABLE_IN_DEVELOPER_MODE(TESTNAME) DISABLED_##TESTNAME +#else +#define DISABLE_IN_DEVELOPER_MODE(TESTNAME) TESTNAME +#endif + int mock_fputs(const char *s, FILE *stream) { fput_count++; if (!expected_message.empty()) { @@ -93,7 +104,7 @@ int mock_strerror_windows(char *buff, size_t s, int errnum) { } extern "C" { - +#define DISABLE_CTL_LOGGER 1 const char *env_variable = ""; #define fopen(A, B) mock_fopen(A, B) #define fputs(A, B) mock_fputs(A, B) @@ -135,13 +146,14 @@ void helper_checkConfig(utils_log_config_t *expected, utils_log_config_t *is) { EXPECT_EQ(expected->level, is->level); EXPECT_EQ(expected->flushLevel, is->flushLevel); EXPECT_EQ(expected->output, is->output); - EXPECT_EQ(expected->timestamp, is->timestamp); - EXPECT_EQ(expected->pid, is->pid); + EXPECT_EQ(expected->enableTimestamp, is->enableTimestamp); + EXPECT_EQ(expected->enablePid, is->enablePid); } -TEST_F(test, parseEnv_errors) { +TEST_F(test, DISABLE_IN_DEVELOPER_MODE(parseEnv_errors)) { expected_message = ""; - loggerConfig = {0, 0, LOG_ERROR, LOG_ERROR, NULL}; + loggerConfig = + utils_log_config_t{false, false, LOG_ERROR, LOG_ERROR, NULL, ""}; expect_fput_count = 0; expected_stream = stderr; @@ -164,7 +176,7 @@ TEST_F(test, parseEnv_errors) { helper_log_init(test_env.c_str()); } -TEST_F(test, parseEnv) { +TEST_F(test, DISABLE_IN_DEVELOPER_MODE(parseEnv)) { utils_log_config_t b = loggerConfig; expected_message = ""; @@ -195,14 +207,16 @@ TEST_F(test, parseEnv) { {"output:file," + std::string(256, 'x'), MOCK_FILE_PTR}, {"output:file," + std::string(257, 'x'), NULL}, }; - std::vector> timestamps = { - {"timestamp:yes", 1}, - {"timestamp:invalid", 0}, - {"timestamp:no", 0}, - {"", 0}}; - - std::vector> pids = { - {"pid:yes", 1}, {"pid:invalid", 0}, {"pid:no", 0}, {"", 0}}; + std::vector> timestamps = { + {"timestamp:yes", true}, + {"timestamp:invalid", false}, + {"timestamp:no", false}, + {"", false}}; + + std::vector> pids = {{"pid:yes", true}, + {"pid:invalid", false}, + {"pid:no", false}, + {"", false}}; for (const auto &logLevel : logLevels) { for (const auto &flushLevel : flushLevels) { for (const auto &output : outputs) { @@ -212,7 +226,8 @@ TEST_F(test, parseEnv) { flushLevel.first + ";" + output.first + ";" + timestamp.first + ";" + pid.first; - b = loggerConfig = {0, 0, LOG_ERROR, LOG_ERROR, NULL}; + b = loggerConfig = utils_log_config_t{ + false, false, LOG_ERROR, LOG_ERROR, NULL, ""}; expect_fput_count = 0; expect_fopen_count = 0; expected_stream = stderr; @@ -229,8 +244,8 @@ TEST_F(test, parseEnv) { expect_fopen_count = 1; } expected_stream = output.second; - b.timestamp = timestamp.second; - b.pid = pid.second; + b.enableTimestamp = timestamp.second; + b.enablePid = pid.second; b.flushLevel = (utils_log_level_t)flushLevel.second; b.level = (utils_log_level_t)logLevel.second; @@ -254,10 +269,12 @@ TEST_F(test, parseEnv) { } } -template void helper_test_log(Args... args) { +template +void helper_test_log(utils_log_level_t level, const char *func, + const char *format, Args... args) { fput_count = 0; fflush_count = 0; - utils_log(args...); + utils_log(level, NULL, func, format, args...); EXPECT_EQ(fput_count, expect_fput_count); EXPECT_EQ(fflush_count, expect_fflush_count); } @@ -284,7 +301,8 @@ TEST_F(test, log_levels) { expected_stream = stderr; for (int i = LOG_DEBUG; i <= LOG_ERROR; i++) { for (int j = LOG_DEBUG; j <= LOG_ERROR; j++) { - loggerConfig = {0, 0, (utils_log_level_t)i, LOG_DEBUG, stderr}; + loggerConfig = utils_log_config_t{ + false, false, (utils_log_level_t)i, LOG_DEBUG, stderr, ""}; if (i > j) { expect_fput_count = 0; expect_fflush_count = 0; @@ -307,7 +325,8 @@ TEST_F(test, log_outputs) { expect_fflush_count = 1; expected_message = "[DEBUG UMF] " + MOCK_FN_NAME + ": example log\n"; for (auto o : outs) { - loggerConfig = {0, 0, LOG_DEBUG, LOG_DEBUG, o}; + loggerConfig = + utils_log_config_t{false, false, LOG_DEBUG, LOG_DEBUG, o, ""}; expected_stream = o; helper_test_log(LOG_DEBUG, MOCK_FN_NAME.c_str(), "%s", "example log"); } @@ -318,7 +337,8 @@ TEST_F(test, flush_levels) { expect_fput_count = 1; for (int i = LOG_DEBUG; i <= LOG_ERROR; i++) { for (int j = LOG_DEBUG; j <= LOG_ERROR; j++) { - loggerConfig = {0, 0, LOG_DEBUG, (utils_log_level_t)i, stderr}; + loggerConfig = utils_log_config_t{ + false, false, LOG_DEBUG, (utils_log_level_t)i, stderr, ""}; if (i > j) { expect_fflush_count = 0; } else { @@ -335,7 +355,8 @@ TEST_F(test, flush_levels) { TEST_F(test, long_log) { expect_fput_count = 1; expect_fflush_count = 1; - loggerConfig = {0, 0, LOG_DEBUG, LOG_DEBUG, stderr}; + loggerConfig = + utils_log_config_t{false, false, LOG_DEBUG, LOG_DEBUG, stderr, ""}; expected_message = "[DEBUG UMF] " + MOCK_FN_NAME + ": " + std::string(8189 - MOCK_FN_NAME.size(), 'x') + "\n"; helper_test_log(LOG_DEBUG, MOCK_FN_NAME.c_str(), "%s", @@ -350,7 +371,8 @@ TEST_F(test, long_log) { TEST_F(test, timestamp_log) { expect_fput_count = 1; expect_fflush_count = 1; - loggerConfig = {1, 0, LOG_DEBUG, LOG_DEBUG, stderr}; + loggerConfig = + utils_log_config_t{true, false, LOG_DEBUG, LOG_DEBUG, stderr, ""}; // TODO: for now we do not check output message, // as it requires more sophisticated message validation (a.k.a regrex) expected_message = ""; @@ -360,7 +382,8 @@ TEST_F(test, timestamp_log) { TEST_F(test, pid_log) { expect_fput_count = 1; expect_fflush_count = 1; - loggerConfig = {0, 1, LOG_DEBUG, LOG_DEBUG, stderr}; + loggerConfig = + utils_log_config_t{false, true, LOG_DEBUG, LOG_DEBUG, stderr, ""}; // TODO: for now we do not check output message, // as it requires more sophisticated message validation (a.k.a regrex) expected_message = ""; @@ -368,7 +391,8 @@ TEST_F(test, pid_log) { } TEST_F(test, log_fatal) { - loggerConfig = {0, 0, LOG_DEBUG, LOG_DEBUG, NULL}; + loggerConfig = + utils_log_config_t{false, false, LOG_DEBUG, LOG_DEBUG, NULL, ""}; expected_stream = stderr; expect_fput_count = 1; expect_fflush_count = 1; @@ -378,11 +402,12 @@ TEST_F(test, log_fatal) { helper_test_log(LOG_FATAL, MOCK_FN_NAME.c_str(), "%s", "example log"); } -TEST_F(test, log_macros) { +TEST_F(test, DISABLE_IN_DEVELOPER_MODE(log_macros)) { expected_stream = stderr; expect_fput_count = 1; expect_fflush_count = 1; - loggerConfig = {0, 0, LOG_DEBUG, LOG_DEBUG, stderr}; + loggerConfig = + utils_log_config_t{false, false, LOG_DEBUG, LOG_DEBUG, stderr, ""}; expected_message = "[DEBUG UMF] TestBody: example log\n"; fput_count = 0; @@ -420,16 +445,19 @@ TEST_F(test, log_macros) { EXPECT_EQ(fflush_count, expect_fflush_count); } -template void helper_test_plog(Args... args) { +template +void helper_test_plog(utils_log_level_t level, const char *func, + const char *format, Args... args) { fput_count = 0; fflush_count = 0; - utils_plog(args...); + utils_plog(level, NULL, func, format, args...); EXPECT_EQ(fput_count, expect_fput_count); EXPECT_EQ(fflush_count, expect_fflush_count); } TEST_F(test, plog_basic) { - loggerConfig = {0, 0, LOG_DEBUG, LOG_DEBUG, stderr}; + loggerConfig = + utils_log_config_t{false, false, LOG_DEBUG, LOG_DEBUG, stderr, ""}; expected_stream = stderr; errno = 1; strerr = "test error"; @@ -445,7 +473,8 @@ TEST_F(test, plog_basic) { } TEST_F(test, plog_invalid) { - loggerConfig = {0, 0, LOG_DEBUG, LOG_DEBUG, stderr}; + loggerConfig = + utils_log_config_t{false, false, LOG_DEBUG, LOG_DEBUG, stderr, ""}; expected_stream = stderr; errno = INVALID_ERRNO; strerr = "test error"; @@ -461,7 +490,8 @@ TEST_F(test, plog_invalid) { } TEST_F(test, plog_long_message) { - loggerConfig = {0, 0, LOG_DEBUG, LOG_DEBUG, stderr}; + loggerConfig = + utils_log_config_t{false, false, LOG_DEBUG, LOG_DEBUG, stderr, ""}; expected_stream = stderr; expect_fput_count = 1; expect_fflush_count = 1; @@ -482,7 +512,8 @@ TEST_F(test, plog_long_message) { } TEST_F(test, plog_long_error) { - loggerConfig = {0, 0, LOG_DEBUG, LOG_DEBUG, stderr}; + loggerConfig = + utils_log_config_t{false, false, LOG_DEBUG, LOG_DEBUG, stderr, ""}; expected_stream = stderr; expect_fput_count = 1; expect_fflush_count = 1; @@ -504,11 +535,12 @@ TEST_F(test, plog_long_error) { strerr = NULL; // do not use tmp.c_str() beyond its scope } -TEST_F(test, log_pmacros) { +TEST_F(test, DISABLE_IN_DEVELOPER_MODE(log_pmacros)) { expected_stream = stderr; expect_fput_count = 1; expect_fflush_count = 1; - loggerConfig = {0, 0, LOG_DEBUG, LOG_DEBUG, stderr}; + loggerConfig = + utils_log_config_t{false, false, LOG_DEBUG, LOG_DEBUG, stderr, ""}; errno = 1; strerr = "test error"; diff --git a/third_party/requirements.txt b/third_party/requirements.txt index 27f041b0d9..50e1ead7a7 100644 --- a/third_party/requirements.txt +++ b/third_party/requirements.txt @@ -4,7 +4,7 @@ black==24.3.0 clang-format==15.0.7 cmake-format==0.6.13 # Linting the source code -bandit==1.8.6 +bandit==1.9.2 # Tests packaging==25.0 # Generating HTML documentation @@ -19,5 +19,5 @@ sphinx==8.1.3 sphinx_book_theme==1.1.3 # Spelling check in documentation codespell==2.4.1 -pyenchant==3.2.2 -sphinxcontrib-spelling==8.0.1 +pyenchant==3.3.0 +sphinxcontrib-spelling==8.0.2