From ff33b161f0920f67fd4111f3c867d64513a4a641 Mon Sep 17 00:00:00 2001 From: Adam Pocock Date: Fri, 14 Nov 2025 16:08:46 -0500 Subject: [PATCH] Upgrading to TF 2.20. Dropping Windows support. --- .github/workflows/build.yml | 64 +--- README.md | 2 +- .../scripts/test_download.sh | 6 +- .../tensorflow-core-native/.bazelversion | 2 +- .../tensorflow-core-native/WORKSPACE | 82 ++++- .../tensorflow-core-native/pom.xml | 10 - .../scripts/dist_download.sh | 16 +- .../tensorflow-core-native/tensorflow.bazelrc | 333 ++++++++++-------- .../tensorflow-core-platform/pom.xml | 8 +- 9 files changed, 284 insertions(+), 239 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 786b86984a3..8fa9c52be67 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,12 +20,12 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Configure Java - uses: actions/setup-java@v2 + uses: actions/setup-java@v5 with: distribution: 'adopt' java-version: '17' - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v5 - name: Build project run: | gcc --version @@ -74,13 +74,13 @@ jobs: sudo apt update sudo apt install -y curl wget unzip tar git gcc g++ - name: Configure Java - uses: actions/setup-java@v2 + uses: actions/setup-java@v5 with: distribution: 'zulu' java-version: '17' architecture: 'aarch64' - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v5 - name: Build project run: | gcc --version @@ -98,12 +98,12 @@ jobs: ext: ["", -gpu] steps: - name: Configure Java - uses: actions/setup-java@v2 + uses: actions/setup-java@v5 with: distribution: 'adopt' java-version: '11' - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v5 - name: Build project run: | gcc --version @@ -121,13 +121,13 @@ jobs: ext: [""] steps: - name: Configure Java - uses: actions/setup-java@v2 + uses: actions/setup-java@v5 with: distribution: 'zulu' java-version: '17' architecture: 'arm64' - name: Checkout repository - uses: actions/checkout@v1 + uses: actions/checkout@v5 - name: Build project run: | clang --version @@ -137,55 +137,9 @@ jobs: - name: Deploy native artifact if: env.DEPLOY_RELEASE == 'true' || env.DEPLOY_SNAPSHOT == 'true' run: mvn -f tensorflow-core/tensorflow-core-native/pom.xml deploy:deploy-file@native-only -B -e -Djavacpp.platform=${{ github.job }} -Djavacpp.platform.extension=${{ matrix.ext }} -Durl=${{ needs.prepare.outputs.repositoryUrl }} - windows-x86_64: - runs-on: windows-2022 - needs: prepare - strategy: - matrix: - ext: [""] #, -gpu] - steps: - - name: Install environment - shell: cmd - run: | - set "PATH=C:\msys64\usr\bin;%PATH%" - python -m pip install numpy six - set "EXT=${{ matrix.ext }}" - echo %JAVA_HOME% - - name: Configure Java - uses: actions/setup-java@v2 - with: - distribution: 'adopt' - java-version: '11' - - name: Checkout repository - uses: actions/checkout@v1 - - name: Build project - shell: cmd - run: | - call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64 - set "PATH=C:\msys64\usr\bin;%PATH%" - echo Shorten work paths to prevent Bazel from reaching MAX_PATH limit - mkdir C:\tmp - set "TEST_TMPDIR=C:\tmp" - set "TMPDIR=C:\tmp" - set "TEMP=C:\tmp" - set "TMP=C:\tmp" - bash --version - git --version - cl - call mvn -version - echo ^^^^ossrh^^${{ secrets.CI_DEPLOY_USERNAME }}^^${{ secrets.CI_DEPLOY_PASSWORD }}^^^^ > %USERPROFILE%\.m2\settings.xml - set "SKIP_EXPORT=true" - call mvn clean install -pl "!tensorflow-framework" -B -U -e -Djavacpp.platform=${{ github.job }} -Djavacpp.platform.extension=${{ matrix.ext }} - if ERRORLEVEL 1 exit /b - - name: Deploy native artifact - if: env.DEPLOY_RELEASE == 'true' || env.DEPLOY_SNAPSHOT == 'true' - shell: cmd - run: | - call mvn -f tensorflow-core/tensorflow-core-native/pom.xml deploy:deploy-file@native-only -B -e -Djavacpp.platform=${{ github.job }} -Djavacpp.platform.extension=${{ matrix.ext }} -Durl=${{ needs.prepare.outputs.repositoryUrl }} - if ERRORLEVEL 1 exit /b deploy: if: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/master' || github.ref == 'refs/heads/staging') }} # DEPLOY_SNAPSHOT (releases should be signed and deployed manually from local machine) - needs: [linux-x86_64, windows-x86_64, macosx-arm64, linux-arm64] + needs: [linux-x86_64, macosx-arm64, linux-arm64] runs-on: ubuntu-22.04 steps: - name: Configure Java diff --git a/README.md b/README.md index e1d1e080bcb..8074164c790 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ only binaries for the following are being **supported and distributed** by this - `linux-x86_64-gpu`: Linux platforms on Intel/AMD chips with Cuda GPU support - `linux-arm64`: Linux platforms on Arm chips - `macosx-arm64`: MacOS X platforms on Apple Silicon chips -- `windows-x86_64`: Windows platforms on Intel/AMD chips +- `windows-x86_64`: Windows platforms on Intel/AMD chips (v1.1.0 and earlier) Binaries for `macosx-x86_64` are available for TF-Java 1.0 series releases and earlier, they were dropped from TF-Java 1.1 and newer as they are no longer supported or released by Google. diff --git a/tensorflow-core/tensorflow-core-api/scripts/test_download.sh b/tensorflow-core/tensorflow-core-api/scripts/test_download.sh index 5d1c2988d7e..86955b286e8 100755 --- a/tensorflow-core/tensorflow-core-api/scripts/test_download.sh +++ b/tensorflow-core/tensorflow-core-api/scripts/test_download.sh @@ -5,13 +5,13 @@ DOWNLOAD_FOLDER="$1" case ${PLATFORM:-} in 'linux-x86_64') - TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/f3/73/3a906feb0d71d9353c6fb2363d4052856cc6eff5a78a097b1a6002d4e908/tensorflow_text-2.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' + TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/5a/e2/3efb758e284f2701429e1afc90293494fa3be7eac93fdc96de6378b21831/tensorflow_text-2.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' ;; 'linux-arm64') - TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/8a/9a/ebba9f6274f8b51e5fe1ac2411b8b6bf680a32d10bd6e9c54be1faeec062/tensorflow_text-2.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl' + TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/21/23/70683698d751e08cf1ab70d6f31a39034a2a9a494c1ec42d301cba3d8287/tensorflow_text-2.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl' ;; 'macosx-arm64') - TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/18/b6/8ad233edb0732847db1da538cea941dcccc42f59304ff6fb449676e6dd5a/tensorflow_text-2.18.1-cp311-cp311-macosx_11_0_arm64.whl' + TEXT_WHEEL_URL='https://files.pythonhosted.org/packages/aa/d7/417877fddb215d8a99ae636946f96b19f6e2ceedcd7eb49be985baae3662/tensorflow_text-2.19.0-cp312-cp312-macosx_11_0_arm64.whl' ;; *) echo "TensorFlow Text distribution for ${PLATFORM} is not supported for download" diff --git a/tensorflow-core/tensorflow-core-native/.bazelversion b/tensorflow-core/tensorflow-core-native/.bazelversion index f3c238740e5..26c75fe8ad4 100644 --- a/tensorflow-core/tensorflow-core-native/.bazelversion +++ b/tensorflow-core/tensorflow-core-native/.bazelversion @@ -1,2 +1,2 @@ -6.5.0 +7.7.0 # NOTE: Update Bazel version in tensorflow/tools/ci_build/release/common.sh.oss \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-native/WORKSPACE b/tensorflow-core/tensorflow-core-native/WORKSPACE index ad2c74508ad..db96299e3ab 100644 --- a/tensorflow-core/tensorflow-core-native/WORKSPACE +++ b/tensorflow-core/tensorflow-core-native/WORKSPACE @@ -18,16 +18,25 @@ http_archive( "find tensorflow third_party/xla/third_party/tsl third_party/xla/xla/tsl -name \\*.proto | xargs sed -i.bak 's/^package tensorflow\\([^;]*\\).*$/package tensorflow\\1;\\noption java_package = \"org.tensorflow.proto\\1\";/'", ], urls = [ - "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.18.0.tar.gz", + "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.20.0.tar.gz", ], - sha256 = "d7876f4bb0235cac60eb6316392a7c48676729860da1ab659fb440379ad5186d", - strip_prefix = "tensorflow-2.18.0" + sha256 = "a640d1f97be316a09301dfc9347e3d929ad4d9a2336e3ca23c32c93b0ff7e5d0", + strip_prefix = "tensorflow-2.20.0" ) ##### Copy content of tensorflow/WORKSPACE here (make sure to change references of default package "//" to "@org_tensorflow//") # buildifier: disable=load-on-top +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "rules_shell", + sha256 = "bc61ef94facc78e20a645726f64756e5e285a045037c7a61f65af2941f4c25e1", + strip_prefix = "rules_shell-0.4.1", + url = "https://github.com/bazelbuild/rules_shell/releases/download/v0.4.1/rules_shell-v0.4.1.tar.gz", +) + # We must initialize hermetic python first. load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") @@ -47,6 +56,12 @@ load("@org_tensorflow//tensorflow:workspace3.bzl", "tf_workspace3") tf_workspace3() +load("@rules_shell//shell:repositories.bzl", "rules_shell_dependencies", "rules_shell_toolchains") + +rules_shell_dependencies() + +rules_shell_toolchains() + # Initialize hermetic Python load("@local_xla//third_party/py:python_init_rules.bzl", "python_init_rules") @@ -67,6 +82,7 @@ python_init_repositories( "3.10": "@org_tensorflow//:requirements_lock_3_10.txt", "3.11": "@org_tensorflow//:requirements_lock_3_11.txt", "3.12": "@org_tensorflow//:requirements_lock_3_12.txt", + "3.13": "@org_tensorflow//:requirements_lock_3_13.txt", }, ) @@ -96,7 +112,35 @@ load("@org_tensorflow//tensorflow:workspace0.bzl", "tf_workspace0") tf_workspace0() load( - "@local_tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl", + "@local_xla//third_party/py:python_wheel.bzl", + "nvidia_wheel_versions_repository", + "python_wheel_version_suffix_repository", +) + +nvidia_wheel_versions_repository( + name = "nvidia_wheel_versions", + versions_source = "//ci/official/requirements_updater:nvidia-requirements.txt", +) + +python_wheel_version_suffix_repository(name = "tf_wheel_version_suffix") + +load( + "@rules_ml_toolchain//cc/deps:cc_toolchain_deps.bzl", + "cc_toolchain_deps", +) + +cc_toolchain_deps() + +register_toolchains("@rules_ml_toolchain//cc:linux_x86_64_linux_x86_64") + +register_toolchains("@rules_ml_toolchain//cc:linux_x86_64_linux_x86_64_cuda") + +register_toolchains("@rules_ml_toolchain//cc:linux_aarch64_linux_aarch64") + +register_toolchains("@rules_ml_toolchain//cc:linux_aarch64_linux_aarch64_cuda") + +load( + "@rules_ml_toolchain//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl", "cuda_json_init_repository", ) @@ -108,7 +152,7 @@ load( "CUDNN_REDISTRIBUTIONS", ) load( - "@local_tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl", + "@rules_ml_toolchain//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl", "cuda_redist_init_repositories", "cudnn_redist_init_repository", ) @@ -122,22 +166,42 @@ cudnn_redist_init_repository( ) load( - "@local_tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl", + "@rules_ml_toolchain//third_party/gpus/cuda/hermetic:cuda_configure.bzl", "cuda_configure", ) cuda_configure(name = "local_config_cuda") load( - "@local_tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl", + "@rules_ml_toolchain//third_party/nccl/hermetic:nccl_redist_init_repository.bzl", "nccl_redist_init_repository", ) nccl_redist_init_repository() load( - "@local_tsl//third_party/nccl/hermetic:nccl_configure.bzl", + "@rules_ml_toolchain//third_party/nccl/hermetic:nccl_configure.bzl", "nccl_configure", ) -nccl_configure(name = "local_config_nccl") \ No newline at end of file +nccl_configure(name = "local_config_nccl") + +load( + "@rules_ml_toolchain//third_party/nvshmem/hermetic:nvshmem_json_init_repository.bzl", + "nvshmem_json_init_repository", +) + +nvshmem_json_init_repository() + +load( + "@nvshmem_redist_json//:distributions.bzl", + "NVSHMEM_REDISTRIBUTIONS", +) +load( + "@rules_ml_toolchain//third_party/nvshmem/hermetic:nvshmem_redist_init_repository.bzl", + "nvshmem_redist_init_repository", +) + +nvshmem_redist_init_repository( + nvshmem_redistributions = NVSHMEM_REDISTRIBUTIONS, +) diff --git a/tensorflow-core/tensorflow-core-native/pom.xml b/tensorflow-core/tensorflow-core-native/pom.xml index bb9eb053c33..ce6041d0427 100644 --- a/tensorflow-core/tensorflow-core-native/pom.xml +++ b/tensorflow-core/tensorflow-core-native/pom.xml @@ -119,12 +119,6 @@ ${project.version} ${javacpp.platform.macosx-arm64} - - ${project.groupId} - ${project.artifactId} - ${project.version} - ${javacpp.platform.windows-x86_64} - ${project.groupId} ${project.artifactId} @@ -161,10 +155,6 @@ ${project.build.directory}/${project.artifactId}-${project.version}-${javacpp.platform.macosx-arm64}.jar ${javacpp.platform.macosx-arm64} - - ${project.build.directory}/${project.artifactId}-${project.version}-${javacpp.platform.windows-x86_64}.jar - ${javacpp.platform.windows-x86_64} - ${project.build.directory}/${project.artifactId}-${project.version}-${javacpp.platform.linux-arm64}.jar ${javacpp.platform.linux-arm64} diff --git a/tensorflow-core/tensorflow-core-native/scripts/dist_download.sh b/tensorflow-core/tensorflow-core-native/scripts/dist_download.sh index acf28b9391d..9e016862ae7 100755 --- a/tensorflow-core/tensorflow-core-native/scripts/dist_download.sh +++ b/tensorflow-core/tensorflow-core-native/scripts/dist_download.sh @@ -5,20 +5,16 @@ DOWNLOAD_FOLDER="$1" case ${PLATFORM:-} in 'linux-x86_64') - WHEEL_URL='https://files.pythonhosted.org/packages/aa/1d/032a9d40762895e51cad06f382135c14d16487a0ad9dcc65aae5bd89c968/tensorflow_cpu-2.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' + WHEEL_URL='https://files.pythonhosted.org/packages/1a/9e/594164db23e3e262da1a0e8983258811eff56e5af6b7b6da5eccccb8d4c7/tensorflow_cpu-2.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' ;; 'linux-x86_64-gpu') - WHEEL_URL='https://files.pythonhosted.org/packages/84/76/c55967ac9968ddaede25a4dce37aba37e9030656f02c12676151ce1b6f22/tensorflow-2.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' + WHEEL_URL='https://files.pythonhosted.org/packages/43/fb/8be8547c128613d82a2b006004026d86ed0bd672e913029a98153af4ffab/tensorflow-2.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' ;; 'linux-arm64') - WHEEL_URL='https://files.pythonhosted.org/packages/56/e4/55aaac2b15af4dad079e5af329a79d961e5206589d0e02b1e8da221472ed/tensorflow-2.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl' + WHEEL_URL='https://files.pythonhosted.org/packages/ea/4c/c1aa90c5cc92e9f7f9c78421e121ef25bae7d378f8d1d4cbad46c6308836/tensorflow-2.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl' ;; 'macosx-arm64') - WHEEL_URL='https://files.pythonhosted.org/packages/26/08/556c4159675c1a30e077ec2a942eeeb81b457cc35c247a5b4a59a1274f05/tensorflow-2.18.0-cp311-cp311-macosx_12_0_arm64.whl' - ;; - 'windows-x86_64') - WHEEL_URL='https://files.pythonhosted.org/packages/76/ad/fa6c508a15ff79cb5409294c293388e0999b7d480f84b65e4287277434fe/tensorflow_intel-2.18.0-cp311-cp311-win_amd64.whl' - CLIB_URL='https://storage.googleapis.com/tensorflow/versions/2.18.0/libtensorflow-cpu-windows-x86_64.zip' + WHEEL_URL='https://files.pythonhosted.org/packages/04/82/af283f402f8d1e9315644a331a5f0f326264c5d1de08262f3de5a5ade422/tensorflow-2.20.0-cp313-cp313-macosx_12_0_arm64.whl' ;; *) echo "TensorFlow distribution for ${PLATFORM} is not supported for download" @@ -58,9 +54,5 @@ if [[ "$PLATFORM" =~ "linux" ]]; then elif [[ "$PLATFORM" =~ "macosx" ]]; then ln -fs libtensorflow_cc.2.dylib libtensorflow_cc.dylib ln -fs libtensorflow_framework.2.dylib libtensorflow_framework.dylib -elif [[ "$PLATFORM" =~ "windows" ]]; then - # FIXME Looks like tsl headers are only present under the tensorflow folder for the windows build - # (while it is also available at the root of the include folder for other platforms) - cd include && ln -fs tensorflow/tsl tsl && cd - fi ls -l . diff --git a/tensorflow-core/tensorflow-core-native/tensorflow.bazelrc b/tensorflow-core/tensorflow-core-native/tensorflow.bazelrc index 0c7cd22dc11..5956441845f 100644 --- a/tensorflow-core/tensorflow-core-native/tensorflow.bazelrc +++ b/tensorflow-core/tensorflow-core-native/tensorflow.bazelrc @@ -46,10 +46,10 @@ # # # Remote build execution options (only configured to work with TF team projects for now.) -# rbe_base: General RBE options shared by all flavors. -# rbe_linux: General RBE options used on all linux builds. -# rbe_win_base: General RBE options used on all Windows builds. Not to be used standalone. -# rbe_win_clang: Options specific to compiling using Clang. +# rbe_base: General RBE options shared by all flavors. +# rbe_linux: General RBE options used on all linux builds. +# rbe_win_base: General RBE options used on all Windows builds. Not to be used standalone. +# rbe_windows_x86_cpu_2022: Windows-specific RBE options. # # rbe_linux_cpu: RBE options to build with only CPU support. # rbe_linux_cuda: RBE options to build with GPU support using clang. @@ -66,23 +66,47 @@ # release_gpu_linux: Toolchain and CUDA options for Linux GPU builds. # release_cpu_macos: Toolchain and CUDA options for MacOS CPU builds. # release_cpu_windows: Toolchain and CUDA options for Windows CPU builds. - +# LINT.IfChange # Default build options. These are applied first and unconditionally. +# These are used to generate the ML wheel version string. +# See the explanation in the file comment of +# @local_xla//third_party/py/python_wheel.bzl. +# The generated version suffix is used in +# third_party/tensorflow/core/public/release_version.h and +# third_party/tensorflow/tools/pip_package/setup.oss.py.tpl +build --repo_env=ML_WHEEL_TYPE="snapshot" +build --repo_env=ML_WHEEL_BUILD_DATE="" +build --repo_env=ML_WHEEL_VERSION_SUFFIX="" + # For projects which use TensorFlow as part of a Bazel build process, putting # nothing in a bazelrc will default to a monolithic build. The following line # opts in to modular op registration support by default. build --define framework_shared_object=true build --define tsl_protobuf_header_only=true -build --define=use_fast_cpp_protos=true build --define=allow_oversize_protos=true build --spawn_strategy=standalone build -c opt +build --repo_env=USE_PYWRAP_RULES=True +build --copt=-DGRPC_BAZEL_BUILD +build --host_copt=-DGRPC_BAZEL_BUILD +build --action_env=GRPC_BAZEL_RUNTIME=1 +build --repo_env=PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=upb +build --action_env=PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=upb +build --repo_env=RULES_PYTHON_ENABLE_PYSTAR=0 + +# Do not do this. If enabled protobuf's core internal target +# @com_google_protobuf//python:protobuf_python will start depending on a bunch +# of cc_binary shared libraries artifacts, which will mess with how we link +# protobuf dependencies ourselves. By default this value is false, but some +# projects enable it, which we don't want here. +# build --define=use_fast_cpp_protos=true + # Make Bazel print out all options from rc files. -build --announce_rc +common --announce_rc # TODO(mihaimaruseac): Document this option or remove if no longer needed build --define=grpc_no_ares=true @@ -101,6 +125,7 @@ build --define=grpc_no_ares=true # all library archives in -whole_archive -no_whole_archive. build --noincompatible_remove_legacy_whole_archive build --features=-force_no_whole_archive +build --host_features=-force_no_whole_archive # TODO(mihaimaruseac): Document this option or remove if no longer needed build --enable_platform_specific_config @@ -114,18 +139,33 @@ build --config=short_logs # TODO(mihaimaruseac): Document this option or remove if no longer needed build --config=v2 +# Precompiling results in some action conflicts. Disable it for now until +# the problematic targets are fixed. +build --@rules_python//python/config_settings:precompile=force_disabled + # TF now has `cc_shared_library` targets, so it needs the experimental flag # TODO(rostam): Remove when `cc_shared_library` is enabled by default -build --experimental_cc_shared_library +common --experimental_cc_shared_library # cc_shared_library ensures no library is linked statically more than once. -build --experimental_link_static_libraries_once=false +common --experimental_link_static_libraries_once=false # Prevent regressions on those two incompatible changes # TODO: remove those flags when they are flipped in the default Bazel version TF uses. -build --incompatible_enforce_config_setting_visibility +common --incompatible_enforce_config_setting_visibility # TODO: also enable this flag after fixing the visibility violations -# build --incompatible_config_setting_private_default_visibility +# common --incompatible_config_setting_private_default_visibility + +# TODO: Enable Bzlmod +common --noenable_bzlmod + +# TODO: Migrate for https://github.com/bazelbuild/bazel/issues/7260 +common --noincompatible_enable_cc_toolchain_resolution +common --noincompatible_enable_android_toolchain_resolution + +# Print a stacktrace when a test is killed +test --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" + # Default options should come above this line. @@ -137,15 +177,19 @@ build:android --host_crosstool_top=@bazel_tools//tools/cpp:toolchain build:android_arm --config=android build:android_arm --cpu=armeabi-v7a build:android_arm --fat_apk_cpu=armeabi-v7a +build:android_arm --platforms=@org_tensorflow//tensorflow/tools/toolchains/android:armeabi-v7a build:android_arm64 --config=android build:android_arm64 --cpu=arm64-v8a build:android_arm64 --fat_apk_cpu=arm64-v8a +build:android_arm64 --platforms=@org_tensorflow//tensorflow/tools/toolchains/android:arm64-v8a build:android_x86 --config=android build:android_x86 --cpu=x86 build:android_x86 --fat_apk_cpu=x86 +build:android_x86 --platforms=@org_tensorflow//tensorflow/tools/toolchains/android:x86 build:android_x86_64 --config=android build:android_x86_64 --cpu=x86_64 build:android_x86_64 --fat_apk_cpu=x86_64 +build:android_x86_64 --platforms=@org_tensorflow//tensorflow/tools/toolchains/android:x86_64 # Build everything statically for Android since all static libs are later # bundled together into a single .so for deployment. @@ -162,24 +206,44 @@ build:macos --copt=-DGRPC_BAZEL_BUILD # Avoid hitting command line argument limit build:macos --features=archive_param_file +# TODO: Fix deps to remove this flag, see https://github.com/bazelbuild/bazel/pull/16414 +build:macos --linkopt=-Wl,-undefined,dynamic_lookup +build:macos --host_linkopt=-Wl,-undefined,dynamic_lookup + +# Use the Apple toolchain for MacOS builds. +build:macos --config=apple-toolchain + +# Use cc toolchains from apple_support for Apple builds (ios, macos, etc). +# https://github.com/bazelbuild/apple_support/tree/master?tab=readme-ov-file#bazel-6-setup +build:apple-toolchain --apple_crosstool_top=@local_config_apple_cc//:toolchain +build:apple-toolchain --crosstool_top=@local_config_apple_cc//:toolchain +build:apple-toolchain --host_crosstool_top=@local_config_apple_cc//:toolchain + # Settings for MacOS on ARM CPUs. build:macos_arm64 --cpu=darwin_arm64 build:macos_arm64 --macos_minimum_os=11.0 +build:macos_arm64 --platforms=@build_bazel_apple_support//configs/platforms:darwin_arm64 # iOS configs for each architecture and the fat binary builds. build:ios --apple_platform_type=ios -build:ios --apple_bitcode=embedded --copt=-fembed-bitcode +build:ios --copt=-fembed-bitcode build:ios --copt=-Wno-c++11-narrowing +build:ios --config=apple-toolchain build:ios_armv7 --config=ios build:ios_armv7 --cpu=ios_armv7 +build:ios_armv7 --platforms=@org_tensorflow//tensorflow/tools/toolchains/ios:ios_armv7 build:ios_arm64 --config=ios build:ios_arm64 --cpu=ios_arm64 +build:ios_arm64 --platforms=@build_bazel_apple_support//configs/platforms:ios_arm64 build:ios_arm64e --config=ios build:ios_arm64e --cpu=ios_arm64e +build:ios_arm64e --platforms=@build_bazel_apple_support//configs/platforms:ios_arm64e build:ios_sim_arm64 --config=ios build:ios_sim_arm64 --cpu=ios_sim_arm64 +build:ios_sim_arm64 --platforms=@build_bazel_apple_support//configs/platforms:ios_sim_arm64 build:ios_x86_64 --config=ios build:ios_x86_64 --cpu=ios_x86_64 +build:ios_x86_64 --platforms=@build_bazel_apple_support//configs/platforms:ios_x86_64 build:ios_fat --config=ios build:ios_fat --ios_multi_cpus=armv7,arm64,i386,x86_64 @@ -216,18 +280,22 @@ build:mkl_aarch64 -c opt build:mkl_aarch64_threadpool --define=build_with_mkl_aarch64=true build:mkl_aarch64_threadpool -c opt +# Default CUDA, CUDNN and NVSHMEM versions. +build:cuda_version --repo_env=HERMETIC_CUDA_VERSION="12.5.1" +build:cuda_version --repo_env=HERMETIC_CUDNN_VERSION="9.3.0" +build:cuda_version --repo_env=HERMETIC_NVSHMEM_VERSION="3.2.5" + # CUDA: This config refers to building CUDA op kernels with nvcc. build:cuda --repo_env TF_NEED_CUDA=1 build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain build:cuda --@local_config_cuda//:enable_cuda -# Default CUDA and CUDNN versions. -build:cuda --repo_env=HERMETIC_CUDA_VERSION="12.5.1" -build:cuda --repo_env=HERMETIC_CUDNN_VERSION="9.3.0" +build:cuda --config=cuda_version # This flag is needed to include CUDA libraries. build:cuda --@local_config_cuda//cuda:include_cuda_libs=true # This configuration is used for building the wheels. build:cuda_wheel --@local_config_cuda//cuda:include_cuda_libs=false +build:cuda_wheel --@local_config_nvshmem//:include_nvshmem_libs=false # CUDA: This config refers to building CUDA op kernels with clang. build:cuda_clang --config=cuda @@ -242,6 +310,8 @@ build:cuda_clang --copt=-Qunused-arguments # major release. Example: sm_80 kernels can run on sm_89 GPUs but # not on sm_90 GPUs. compute_80 kernels though can also run on sm_90 GPUs. build:cuda_clang --repo_env=HERMETIC_CUDA_COMPUTE_CAPABILITIES="sm_60,sm_70,sm_80,sm_89,compute_90" +# Permit newer CUDA versions than Clang is aware of +build:cuda_clang --copt="-Wno-unknown-cuda-version" # Set lld as the linker. build:cuda_clang --host_linkopt="-fuse-ld=lld" build:cuda_clang --host_linkopt="-lm" @@ -250,16 +320,16 @@ build:cuda_clang --linkopt="-lm" # Set up compilation CUDA version and paths and use the CUDA Clang toolchain. build:cuda_clang_official --config=cuda_clang -build:cuda_clang_official --repo_env=HERMETIC_CUDA_VERSION="12.5.1" -build:cuda_clang_official --repo_env=HERMETIC_CUDNN_VERSION="9.3.0" +build:cuda_clang_official --config=cuda_version build:cuda_clang_official --action_env=CLANG_CUDA_COMPILER_PATH="/usr/lib/llvm-18/bin/clang" build:cuda_clang_official --crosstool_top="@local_config_cuda//crosstool:toolchain" # Build with nvcc for CUDA and clang for host -build:nvcc_clang --config=cuda -build:nvcc_clang --action_env=TF_NVCC_CLANG="1" -build:nvcc_clang --@local_config_cuda//:cuda_compiler=nvcc - +build:cuda_nvcc --config=cuda +build:cuda_nvcc --action_env=TF_NVCC_CLANG="1" +build:cuda_nvcc --@local_config_cuda//:cuda_compiler=nvcc +# Old config for backward compatibility +build:nvcc_clang --config=cuda_nvcc # Debug config build:dbg -c dbg @@ -284,11 +354,26 @@ build:tpu --define=framework_shared_object=true build:tpu --copt=-DLIBTPU_ON_GCE build:tpu --define=enable_mlir_bridge=true +build:rocm --copt=-Wno-gnu-offsetof-extensions build:rocm --crosstool_top=@local_config_rocm//crosstool:toolchain build:rocm --define=using_rocm_hipcc=true build:rocm --define=tensorflow_mkldnn_contraction_kernel=0 +build:rocm --define=xnn_enable_avxvnniint8=false +build:rocm --define=xnn_enable_avx512fp16=false build:rocm --repo_env TF_NEED_ROCM=1 +build:rocm_clang_official --config=rocm +build:rocm_clang_official --action_env=CLANG_COMPILER_PATH="/usr/lib/llvm-18/bin/clang" +build:rocm_clang_official --action_env=TF_ROCM_CLANG="1" +build:rocm_clang_official --linkopt="-fuse-ld=lld" +build:rocm_clang_official --host_linkopt="-fuse-ld=lld" + +build:rocm_ci --config=rocm_clang_official +build:rocm_ci_hermetic --config=rocm_clang_official +build:rocm_ci_hermetic --repo_env="OS=ubuntu_22.04" +build:rocm_ci_hermetic --repo_env="ROCM_VERSION=6.2.0" +build:rocm_ci_hermetic --@local_config_rocm//rocm:use_rocm_hermetic_rpath=True + build:sycl --crosstool_top=@local_config_sycl//crosstool:toolchain build:sycl --define=using_sycl=true build:sycl --define=tensorflow_mkldnn_contraction_kernel=0 @@ -329,8 +414,6 @@ build:linux --copt="-Werror=unused-result" # Add switch as an error on Linux. build:linux --copt="-Wswitch" build:linux --copt="-Werror=switch" -# Required for building with clang -build:linux --copt="-Wno-error=unused-but-set-variable" # Linux ARM64 specific options build:linux_arm64 --copt="-mtune=generic" --copt="-march=armv8-a" --copt="-O3" @@ -369,15 +452,13 @@ build:windows --host_copt=-D_ENABLE_EXTENDED_ALIGNED_STORAGE # runfiles symlink tree to decide what to put into the Python wheel. startup --windows_enable_symlinks build:windows --enable_runfiles +build:windows --nobuild_python_zip +build:windows --dynamic_mode=off # Default paths for TF_SYSTEM_LIBS build:linux --define=PREFIX=/usr -build:linux --define=LIBDIR=$(PREFIX)/lib -build:linux --define=INCLUDEDIR=$(PREFIX)/include build:linux --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include build:macos --define=PREFIX=/usr -build:macos --define=LIBDIR=$(PREFIX)/lib -build:macos --define=INCLUDEDIR=$(PREFIX)/include build:macos --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include # TF_SYSTEM_LIBS do not work on windows. @@ -440,28 +521,31 @@ build:avx_linux --copt=-mavx build:avx_linux --host_copt=-mavx build:avx_win --copt=/arch:AVX -# Use Clang-cl compiler on Windows -build:win_clang --copt=/clang:-Weverything +build:win_clang_base --@com_google_protobuf//build_defs:use_dlls=True +build:win_clang_base --@com_google_absl//absl:use_dlls=True +build:win_clang_base --linkopt=/demangle:no --host_linkopt=/demangle:no +build:win_clang_base --linkopt=/errorlimit:0 --host_linkopt=/errorlimit:0 +build:win_clang_base --copt=/clang:-Weverything +build:win_clang_base --host_copt=/clang:-Weverything +build:win_clang_base --compiler=clang-cl +build:win_clang_base --linkopt=/FORCE:MULTIPLE +build:win_clang_base --host_linkopt=/FORCE:MULTIPLE +build:win_clang_base --action_env=PATHEXT=.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC;.PY;.PYW +test:win_clang_base --linkopt=/FORCE:MULTIPLE +test:win_clang_base --host_linkopt=/FORCE:MULTIPLE +test:win_clang_base --build_tests_only --keep_going --test_output=errors --verbose_failures=true --test_summary=short + +build:win_clang --config=win_clang_base build:win_clang --extra_toolchains=@local_config_cc//:cc-toolchain-x64_windows-clang-cl build:win_clang --extra_execution_platforms=//tensorflow/tools/toolchains/win:x64_windows-clang-cl build:win_clang --host_platform=//tensorflow/tools/toolchains/win:x64_windows-clang-cl -build:win_clang --compiler=clang-cl -build:win_clang --linkopt=/FORCE:MULTIPLE -build:win_clang --host_linkopt=/FORCE:MULTIPLE -test:win_clang --linkopt=/FORCE:MULTIPLE -test:win_clang --host_linkopt=/FORCE:MULTIPLE - -# Same config as above but for XLA, which has different toolchain paths -build:win_clang_xla --copt=/clang:-Weverything -build:win_clang_xla --extra_toolchains=@local_config_cc//:cc-toolchain-x64_windows-clang-cl -build:win_clang_xla --extra_execution_platforms=//tools/toolchains/win:x64_windows-clang-cl -build:win_clang_xla --host_platform=//tools/toolchains/win:x64_windows-clang-cl -build:win_clang_xla --compiler=clang-cl -build:win_clang_xla --linkopt=/FORCE:MULTIPLE -build:win_clang_xla --host_linkopt=/FORCE:MULTIPLE -test:win_clang_xla --action_env=PATHEXT=.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC;.PY;.PYW -test:win_clang_xla --linkopt=/FORCE:MULTIPLE -test:win_clang_xla --host_linkopt=/FORCE:MULTIPLE + +build:windows_x86_cpu_2022 --config=win_clang_base +build:windows_x86_cpu_2022 --crosstool_top="//tensorflow/tools/toolchains/win2022/20241118:toolchain" +build:windows_x86_cpu_2022 --extra_toolchains="//tensorflow/tools/toolchains/win2022/20241118:cc-toolchain-x64_windows-clang-cl" +build:windows_x86_cpu_2022 --extra_execution_platforms="//tensorflow/tools/toolchains/win2022:windows_ltsc2022_clang" +build:windows_x86_cpu_2022 --host_platform="//tensorflow/tools/toolchains/win2022:windows_ltsc2022_clang" +build:windows_x86_cpu_2022 --platforms="//tensorflow/tools/toolchains/win2022:windows_ltsc2022_clang" # Options to build TensorFlow 1.x or 2.x. # TODO(kanglan): Change v2's define to default behavior @@ -520,9 +604,9 @@ build:rbe_linux_cpu --crosstool_top="@local_config_cuda//crosstool:toolchain" build:rbe_linux_cpu --extra_toolchains="@local_config_cuda//crosstool:toolchain-linux-x86_64" build:rbe_linux_cpu --repo_env=CC="/usr/lib/llvm-18/bin/clang" build:rbe_linux_cpu --repo_env=TF_SYSROOT="/dt9" -build:rbe_linux_cpu --extra_execution_platforms="@sigbuild-r2.17-clang_config_platform//:platform" -build:rbe_linux_cpu --host_platform="@sigbuild-r2.17-clang_config_platform//:platform" -build:rbe_linux_cpu --platforms="@sigbuild-r2.17-clang_config_platform//:platform" +build:rbe_linux_cpu --extra_execution_platforms="@ml_build_config_platform//:platform" +build:rbe_linux_cpu --host_platform="@ml_build_config_platform//:platform" +build:rbe_linux_cpu --platforms="@ml_build_config_platform//:platform" # This is needed for all Clang17 builds but must not be present in GCC builds. build:rbe_linux_cpu --copt=-Wno-error=unused-command-line-argument # This was added in clang-16 by https://reviews.llvm.org/D133574. @@ -535,6 +619,12 @@ build:rbe_linux_cpu --python_path="/usr/bin/python3" # These you may need to change for your own GCP project. common:rbe_linux_cpu --remote_instance_name=projects/tensorflow-testing/instances/default_instance +# Download CUDA/CUDNN redistributions to preserve the repositories cache between +# CPU and GPU builds. +# TODO(ybaturina): Uncomment when RBE is ready to support this. +# build:rbe_linux_cpu --repo_env USE_CUDA_REDISTRIBUTIONS=1 +# build:rbe_linux_cpu --config=cuda_version + # TODO(kanglan): Remove it after toolchain update is complete. build:rbe_linux_cpu_old --config=rbe_linux build:rbe_linux_cpu_old --host_crosstool_top="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain" @@ -548,11 +638,13 @@ common:rbe_linux_cpu_old --remote_instance_name=projects/tensorflow-testing/inst build:rbe_linux_cuda --config=cuda_clang_official build:rbe_linux_cuda --config=rbe_linux_cpu +build:rbe_linux_cuda --repo_env=USE_CUDA_TAR_ARCHIVE_FILES=1 +build:rbe_linux_cuda --repo_env=USE_NVSHMEM_TAR_ARCHIVE_FILES=1 # For Remote build execution -- GPU configuration build:rbe_linux_cuda --repo_env=REMOTE_GPU_TESTING=1 build:rbe_linux_cuda_nvcc --config=rbe_linux_cuda -build:rbe_linux_cuda_nvcc --config=nvcc_clang +build:rbe_linux_cuda_nvcc --config=cuda_nvcc build:rbe_linux_cuda_nvcc --repo_env TF_NCCL_USE_STUB=1 build:rbe_win_base --config=rbe_base @@ -562,20 +654,11 @@ build:rbe_win_base --remote_instance_name=projects/tensorflow-testing/instances/ build:rbe_win_base --remote_download_minimal build:rbe_win_base --enable_runfiles build:rbe_win_base --nobuild_python_zip +# Don't build the runfile links in the RBE build which is expensive on Windows. +build:rbe_win_base --nobuild_runfile_links build:rbe_win_base --define=override_eigen_strong_inline=true -build:rbe_win_clang --config=rbe_win_base -build:rbe_win_clang --crosstool_top="//tensorflow/tools/toolchains/win/20240424:toolchain" -build:rbe_win_clang --extra_toolchains="//tensorflow/tools/toolchains/win/20240424:cc-toolchain-x64_windows-clang-cl" -build:rbe_win_clang --extra_execution_platforms="//tensorflow/tools/toolchains/win:x64_windows-clang-cl" -build:rbe_win_clang --host_platform="//tensorflow/tools/toolchains/win:x64_windows-clang-cl" -build:rbe_win_clang --platforms="//tensorflow/tools/toolchains/win:x64_windows-clang-cl" -build:rbe_win_clang --compiler=clang-cl -build:rbe_win_clang --linkopt=/FORCE:MULTIPLE -build:rbe_win_clang --host_linkopt=/FORCE:MULTIPLE - -# TODO(belitskiy): Rename `rbe_win_clang` to this, once done switching presubmits. -build:rbe_windows_x86_cpu --config=rbe_win_clang +build:rbe_windows_x86_cpu_2022 --config=rbe_win_base --config=windows_x86_cpu_2022 # END TF REMOTE BUILD EXECUTION OPTIONS @@ -584,15 +667,16 @@ build:elinux --crosstool_top=@local_config_embedded_arm//:toolchain build:elinux --host_crosstool_top=@bazel_tools//tools/cpp:toolchain build:elinux_aarch64 --config=elinux build:elinux_aarch64 --cpu=aarch64 +build:elinux_aarch64 --platforms=@org_tensorflow//tensorflow/tools/toolchains/linux:linux_aarch64 build:elinux_armhf --config=elinux build:elinux_armhf --cpu=armhf +build:elinux_armhf --platforms=@org_tensorflow//tensorflow/tools/toolchains/linux:linux_armhf build:elinux_armhf --copt -mfp16-format=ieee # Config-specific options should come above this line. # Load rc file written by ./configure. try-import %workspace%/.tf_configure.bazelrc -try-import %workspace%/xla_configure.bazelrc # Load rc file with user-specific options. try-import %workspace%/.bazelrc.user @@ -625,10 +709,6 @@ build:release_linux_base --linkopt="-lm" build:release_linux_base --linkopt=-Wl,--undefined-version # Container environment settings below this point. -# Use Python 3.X as installed in container image -build:release_linux_base --action_env PYTHON_BIN_PATH="/usr/bin/python3" -build:release_linux_base --action_env PYTHON_LIB_PATH="/usr/lib/tf_python" -build:release_linux_base --python_path="/usr/bin/python3" # Set Clang as compiler. Use the actual path to clang installed in container. build:release_linux_base --repo_env=CC="/usr/lib/llvm-18/bin/clang" build:release_linux_base --repo_env=BAZEL_COMPILER="/usr/lib/llvm-18/bin/clang" @@ -731,51 +811,52 @@ build:tf_public_macos_cache_push --config=tf_public_macos_cache --remote_upload_ # These are convenience config options that effectively declare TF's CI test suites. Look # at the scripts of ci/official/ to see how TF's CI uses them. -# LIBTENSORFLOW TESTS are for building Libtensorflow archives. These are CUDA/CPU-agnostic. -test:linux_libtensorflow_test --config=cuda_wheel -- //tensorflow/tools/lib_package:libtensorflow_test //tensorflow/tools/lib_package:libtensorflow_java_test -build:linux_libtensorflow_build --config=cuda_wheel -- //tensorflow/tools/lib_package:libtensorflow.tar.gz //tensorflow/tools/lib_package:libtensorflow_jni.tar.gz //tensorflow/java:libtensorflow.jar //tensorflow/java:libtensorflow-src.jar //tensorflow/tools/lib_package:libtensorflow_proto.zip - # PYTHON TESTS run a suite of Python tests intended for verifying that the Python wheel # will work properly. These are usually run Nightly or upon Release. # CPU WHEEL -test:linux_cpu_wheel_test_filters --test_tag_filters=-no_oss,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_oss_py38,-no_oss_py39,-no_oss_py310 -test:linux_cpu_wheel_test_filters --build_tag_filters=-no_oss,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_oss_py38,-no_oss_py39,-no_oss_py310 +test:linux_cpu_wheel_test_filters --test_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_oss_py38,-no_oss_py39,-no_oss_py310,-no_oss_py313 +test:linux_cpu_wheel_test_filters --build_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_oss_py38,-no_oss_py39,-no_oss_py310,-no_oss_py313 test:linux_cpu_wheel_test_filters --test_lang_filters=py --test_size_filters=small,medium -test:linux_cpu_wheel_test --config=linux_cpu_wheel_test_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... +test:linux_cpu_wheel_test --@local_xla//third_party/py:wheel_dependency=true --config=linux_cpu_wheel_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:prebuilt_wheel_import_api_packages_test_cpu -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... # CUDA WHEEL -test:linux_cuda_wheel_test_filters --test_tag_filters=gpu,requires-gpu,-no_gpu,-no_oss,-oss_excluded,-oss_serial,-benchmark-test,-no_cuda11,-no_oss_py38,-no_oss_py39,-no_oss_py310 -test:linux_cuda_wheel_test_filters --build_tag_filters=gpu,requires-gpu,-no_gpu,-no_oss,-oss_excluded,-oss_serial,-benchmark-test,-no_cuda11,-no_oss_py38,-no_oss_py39,-no_oss_py310 +test:linux_cuda_wheel_test_filters --test_tag_filters=gpu,requires-gpu,-no_gpu,-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-benchmark-test,-no_cuda11,-no_oss_py38,-no_oss_py39,-no_oss_py310,-no_oss_py313 +test:linux_cuda_wheel_test_filters --build_tag_filters=gpu,requires-gpu,-no_gpu,-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-benchmark-test,-no_cuda11,-no_oss_py38,-no_oss_py39,-no_oss_py310,-no_oss_py313 test:linux_cuda_wheel_test_filters --test_lang_filters=py --test_size_filters=small,medium -test:linux_cuda_wheel_test --config=linux_cuda_wheel_test_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... +test:linux_cuda_wheel_test --@local_xla//third_party/py:wheel_dependency=true --config=linux_cuda_wheel_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:prebuilt_wheel_import_api_packages_test_gpu -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... # ARM64 WHEEL -test:linux_arm64_wheel_test_filters --test_tag_filters=-no_oss,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_oss_py38,-no_oss_py39,-no_oss_py310 -test:linux_arm64_wheel_test_filters --build_tag_filters=-no_oss,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_oss_py38,-no_oss_py39,-no_oss_py310 +test:linux_arm64_wheel_test_filters --test_tag_filters=-no_oss,-tf_tosa,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_oss_py38,-no_oss_py39,-no_oss_py310,-no_oss_py313 +test:linux_arm64_wheel_test_filters --build_tag_filters=-no_oss,-tf_tosa,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_oss_py38,-no_oss_py39,-no_oss_py310,-no_oss_py313 test:linux_arm64_wheel_test_filters --test_lang_filters=py --test_size_filters=small,medium -test:linux_arm64_wheel_test --config=linux_arm64_wheel_test_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/core/grappler/optimizers:auto_mixed_precision_test_cpu -//tensorflow/core/grappler/optimizers:remapper_test_cpu -//tensorflow/core/kernels/image:resize_bicubic_op_test +test:linux_arm64_wheel_test --@local_xla//third_party/py:wheel_dependency=true --config=linux_arm64_wheel_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:prebuilt_wheel_import_api_packages_test_cpu -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/core/grappler/optimizers:auto_mixed_precision_test_cpu -//tensorflow/core/grappler/optimizers:remapper_test_cpu -//tensorflow/core/kernels/image:resize_bicubic_op_test # MACOS ARM64 WHEEL -test:macos_arm64_wheel_test_filters --test_tag_filters=-no_oss,-oss_excluded,-oss_serial,-no_oss_py39,-no_oss_py310,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test,-no_mac_arm64,-no_aarch64 -test:macos_arm64_wheel_test_filters --build_tag_filters=-no_oss,-oss_excluded,-oss_serial,-no_oss_py39,-no_oss_py310,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test,-no_mac_arm64,-no_aarch64 +test:macos_arm64_wheel_test_filters --test_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-no_oss_py39,-no_oss_py310,-no_oss_py313,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test,-no_mac_arm64,-no_aarch64 +test:macos_arm64_wheel_test_filters --build_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-no_oss_py39,-no_oss_py310,-no_oss_py313,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test,-no_mac_arm64,-no_aarch64 test:macos_arm64_wheel_test_filters --test_lang_filters=py --test_size_filters=small,medium -test:macos_arm64_wheel_test --config=macos_arm64_wheel_test_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/compiler/aot/... +test:macos_arm64_wheel_test --@local_xla//third_party/py:wheel_dependency=true --config=macos_arm64_wheel_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:prebuilt_wheel_import_api_packages_test_cpu -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/compiler/aot/... # MACOS X86 WHEEL -test:macos_x86_wheel_test_filters --test_tag_filters=-no_oss,-oss_excluded,-oss_serial,-no_oss_py38,-no_oss_py39,-no_oss_py310,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test -test:macos_x86_wheel_test_filters --build_tag_filters=-no_oss,-oss_excluded,-oss_serial,-no_oss_py38,-no_oss_py39,-no_oss_py310,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test +test:macos_x86_wheel_test_filters --test_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-no_oss_py38,-no_oss_py39,-no_oss_py310,-no_oss_py313,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test +test:macos_x86_wheel_test_filters --build_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-no_oss_py38,-no_oss_py39,-no_oss_py310,-no_oss_py313,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test test:macos_x86_wheel_test_filters --test_lang_filters=py --test_size_filters=small,medium -test:macos_x86_wheel_test --config=macos_x86_wheel_test_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/compiler/aot/... +test:macos_x86_wheel_test --@local_xla//third_party/py:wheel_dependency=true --config=macos_x86_wheel_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:prebuilt_wheel_import_api_packages_test_cpu -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/compiler/aot/... +# WINDOWS X86 WHEEL +test:windows_x86_cpu_2022_wheel_test_filters --test_tag_filters=-no_windows,-windows_excluded,-no_oss,-oss_excluded,-gpu,-tpu,-benchmark-test,-v1only +test:windows_x86_cpu_2022_wheel_test_filters --build_tag_filters=-no_windows,-windows_excluded,-no_oss,-oss_excluded,-benchmark-test,-v1only +test:windows_x86_cpu_2022_wheel_test_filters --test_lang_filters=cc,py --test_size_filters=small,medium --test_timeout="300,450,1200,3600" +test:windows_x86_cpu_2022_wheel_test --build_tests_only --config=windows_x86_cpu_pycpp_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:prebuilt_wheel_import_api_packages_test_cpu -//tensorflow/java/... -//tensorflow/lite/... -//tensorflow/compiler/... # PYCPP TESTS run a suite of Python and C++ tests to verify general correctness over # the whole TF code base. These are usually run continuously or upon presubmit. # LINUX CPU PYCPP: -test:linux_cpu_pycpp_test_filters --test_tag_filters=-no_oss,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only -test:linux_cpu_pycpp_test_filters --build_tag_filters=-no_oss,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only +test:linux_cpu_pycpp_test_filters --test_tag_filters=-no_oss,-no_oss_py313,-tf_tosa,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only +test:linux_cpu_pycpp_test_filters --build_tag_filters=-no_oss,-no_oss_py313,-tf_tosa,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only test:linux_cpu_pycpp_test_filters --test_lang_filters=cc,py --test_size_filters=small,medium -test:linux_cpu_pycpp_test --config=linux_cpu_pycpp_test_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... +test:linux_cpu_pycpp_test --config=linux_cpu_pycpp_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:import_api_packages_test_cpu -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... # LINUX CUDA PYCPP: -test:linux_cuda_pycpp_test_filters --test_tag_filters=-no_oss,-oss_excluded,-oss_serial,-benchmark-test,-v1only,gpu,-no_gpu,-no_gpu_presubmit,-no_cuda11 -test:linux_cuda_pycpp_test_filters --build_tag_filters=-no_oss,-oss_excluded,-oss_serial,-benchmark-test,-v1only,gpu,-no_gpu,-no_gpu_presubmit,-no_cuda11 +test:linux_cuda_pycpp_test_filters --test_tag_filters=-no_oss,-no_oss_py313,-tf_tosa,-oss_excluded,-oss_serial,-benchmark-test,-v1only,gpu,-no_gpu,-no_gpu_presubmit,-no_cuda11 +test:linux_cuda_pycpp_test_filters --build_tag_filters=-no_oss,-no_oss_py313,-tf_tosa,-oss_excluded,-oss_serial,-benchmark-test,-v1only,gpu,-no_gpu,-no_gpu_presubmit,-no_cuda11 test:linux_cuda_pycpp_test_filters --test_lang_filters=cc,py --test_size_filters=small,medium -test:linux_cuda_pycpp_test --config=linux_cuda_pycpp_test_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... +test:linux_cuda_pycpp_test --config=linux_cuda_pycpp_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:import_api_packages_test_gpu -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... # LINUX ARM64 PYCPP # In Linux Arm64 presubmit/continuous build, we cross-compile the binaries on @@ -786,36 +867,37 @@ test:linux_cuda_pycpp_test --config=linux_cuda_pycpp_test_filters -- //tensorflo # do not run them. By prefixing the configs with "build", we can run both # `bazel build` and `bazel test` commands with the same config as test configs # inherit from build. -build:linux_arm64_pycpp_test_filters --test_tag_filters=-no_oss,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only -build:linux_arm64_pycpp_test_filters --build_tag_filters=-no_oss,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only +build:linux_arm64_pycpp_test_filters --test_tag_filters=-no_oss,-no_oss_py313,-tf_tosa,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only +build:linux_arm64_pycpp_test_filters --build_tag_filters=-no_oss,-no_oss_py313,-tf_tosa,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only build:linux_arm64_pycpp_test_filters --test_lang_filters=cc,py --test_size_filters=small,medium --flaky_test_attempts=3 # TODO(michaelhudgins): Why do we need to specifically omit go and java here? -build:linux_arm64_pycpp_test --config=linux_arm64_pycpp_test_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/core/grappler/optimizers:auto_mixed_precision_test_cpu -//tensorflow/core/grappler/optimizers:remapper_test_cpu -//tensorflow/core/kernels/image:resize_bicubic_op_test -//tensorflow/python/tools:aot_compiled_test +build:linux_arm64_pycpp_test --config=linux_arm64_pycpp_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:import_api_packages_test_cpu -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/core/grappler/optimizers:auto_mixed_precision_test_cpu -//tensorflow/core/grappler/optimizers:remapper_test_cpu -//tensorflow/core/kernels/image:resize_bicubic_op_test -//tensorflow/python/tools:aot_compiled_test # CROSS-COMPILE ARM64 PYCPP build:cross_compile_linux_arm64_pycpp_test --config=linux_arm64_pycpp_test # Tests that fail only when cross-compiled build:cross_compile_linux_arm64_pycpp_test -//tensorflow/compiler/mlir/quantization/stablehlo:convert_tf_quant_to_mhlo_int_test # MACOS ARM64 PYCPP -test:macos_arm64_pycpp_test_filters --test_tag_filters=-no_oss,-oss_excluded,-oss_serial,-no_oss_py39,-no_oss_py310,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test,-no_mac_arm64,-no_aarch64 -test:macos_arm64_pycpp_test_filters --build_tag_filters=-no_oss,-oss_excluded,-oss_serial,-no_oss_py39,-no_oss_py310,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test,-no_mac_arm64,-no_aarch64 +test:macos_arm64_pycpp_test_filters --test_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-no_oss_py39,-no_oss_py310,-no_oss_py313,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test,-no_mac_arm64,-no_aarch64 +test:macos_arm64_pycpp_test_filters --build_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-no_oss_py39,-no_oss_py310,-no_oss_py313,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test,-no_mac_arm64,-no_aarch64 test:macos_arm64_pycpp_test_filters --test_lang_filters=cc,py --test_size_filters=small,medium -test:macos_arm64_pycpp_test --config=macos_arm64_pycpp_test_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/compiler/aot/... -//tensorflow/core/kernels/image:resize_bicubic_op_test +test:macos_arm64_pycpp_test --config=macos_arm64_pycpp_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:import_api_packages_test_cpu -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/compiler/aot/... -//tensorflow/core/kernels/image:resize_bicubic_op_test # MACOS X86 PYCPP # These are defined as build configs so that we can run a build only job. See # the note under "ARM64 PYCPP" for more details. -build:macos_x86_pycpp_test_filters --test_tag_filters=-no_oss,-oss_excluded,-oss_serial,-no_oss_py38,-no_oss_py39,-no_oss_py310,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test -build:macos_x86_pycpp_test_filters --build_tag_filters=-no_oss,-oss_excluded,-oss_serial,-no_oss_py38,-no_oss_py39,-no_oss_py310,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test +build:macos_x86_pycpp_test_filters --test_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-no_oss_py38,-no_oss_py39,-no_oss_py310,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test +build:macos_x86_pycpp_test_filters --build_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-no_oss_py38,-no_oss_py39,-no_oss_py310,-nomac,-no_mac,-mac_excluded,-v1only,-gpu,-tpu,-benchmark-test build:macos_x86_pycpp_test_filters --keep_going --test_lang_filters=cc,py --test_size_filters=small,medium -build:macos_x86_pycpp_test --config=macos_x86_pycpp_test_filters -- //tensorflow/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/tools/toolchains/... -//tensorflow/lite/... -//tensorflow/compiler/aot/... +build:macos_x86_pycpp_test --config=macos_x86_pycpp_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:import_api_packages_test_cpu -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/tools/toolchains/... -//tensorflow/lite/... -//tensorflow/compiler/aot/... # CROSS-COMPILE MACOS X86 PYCPP build:cross_compile_macos_x86_pycpp_test --config=macos_x86_pycpp_test build:cross_compile_macos_x86_pycpp_test -//tensorflow/core/kernels:quantized_conv_ops_test -//tensorflow/core/kernels:quantized_matmul_op_test -//tensorflow/python/ops:quantized_conv_ops_test -//tensorflow/tools/graph_transforms:transforms_test -//tensorflow/python/tools:aot_compiled_test # WINDOWS X86-64 CPU PYCPP -test:windows_x86_cpu_pycpp_test_filters --test_tag_filters=-no_windows,-windows_excluded,-no_oss,-oss_excluded,-gpu,-tpu,-benchmark-test -test:windows_x86_cpu_pycpp_test_filters --build_tag_filters=-no_windows,-windows_excluded,-no_oss,-oss_excluded,-benchmark-test -test:windows_x86_cpu_pycpp_test_filters --test_lang_filters=cc,py --test_size_filters=small,medium --test_timeout="300,450,1200,3600" -test:windows_x86_cpu_pycpp_test_opts --copt=/d2ReducedOptimizeHugeFunctions --host_copt=/d2ReducedOptimizeHugeFunctions --dynamic_mode=off --build_tests_only -test:windows_x86_cpu_pycpp_test --config=windows_x86_cpu_pycpp_test_opts --config=windows_x86_cpu_pycpp_test_filters -- //tensorflow/... -//tensorflow/java/... -//tensorflow/lite/... -//tensorflow/compiler/... +build:windows_x86_cpu_2022_pycpp_test_build_opts --copt=/d2ReducedOptimizeHugeFunctions --host_copt=/d2ReducedOptimizeHugeFunctions --dynamic_mode=off +test:windows_x86_cpu_2022_pycpp_test_filters --test_tag_filters=-no_windows,-windows_excluded,-no_oss,-tf_tosa,-oss_excluded,-gpu,-tpu,-benchmark-test,-v1only +build:windows_x86_cpu_2022_pycpp_test_filters --build_tag_filters=-no_windows,-windows_excluded,-no_oss,-tf_tosa,-oss_excluded,-benchmark-test,-v1only +test:windows_x86_cpu_2022_pycpp_test_filters --test_lang_filters=cc,py --test_size_filters=small,medium --test_timeout="300,450,1200,3600" +test:windows_x86_cpu_2022_pycpp_test_opts --config=windows_x86_cpu_2022_pycpp_test_build_opts --build_tests_only +test:windows_x86_cpu_2022_pycpp_test --config=windows_x86_cpu_2022_pycpp_test_opts --config=windows_x86_cpu_2022_pycpp_test_filters -- //tensorflow/... //tensorflow/tools/pip_package:import_api_packages_test_cpu -//tensorflow/java/... -//tensorflow/lite/... -//tensorflow/compiler/... # END TF TEST SUITE OPTIONS @@ -829,38 +911,15 @@ build:cross_compile_base --host_cpu=k8 build:cross_compile_base --host_crosstool_top=//tensorflow/tools/toolchains/cross_compile/cc:cross_compile_toolchain_suite build:cross_compile_base --extra_execution_platforms=//tensorflow/tools/toolchains/cross_compile/config:linux_x86_64 -# XLA related settings for cross-compiled build. Certain paths are -# different in the XLA repo. -build:cross_compile_base_xla --host_cpu=k8 -build:cross_compile_base_xla --host_crosstool_top=//tools/toolchains/cross_compile/cc:cross_compile_toolchain_suite -build:cross_compile_base_xla --extra_execution_platforms=//tools/toolchains/cross_compile/config:linux_x86_64 - build:rbe_cross_compile_base --config=rbe_base build:rbe_cross_compile_base --remote_instance_name=projects/tensorflow-testing/instances/default_instance -# XLA depends on some local Python headers that are configured as Genrule. They -# are present on the local host machine but not on the remote execution machine, -# leading to build failures. To resolve the issue, the following line is added -# to make sure all Genrule targets are excuted locally. -build:rbe_cross_compile_base_xla --config=rbe_cross_compile_base -build:rbe_cross_compile_base_xla --strategy=Genrule=standalone - -# Due to the above strategy, all Genrule commands are executed locally, but the -# following actions invoke tools (E.g `flatc`, `llvm-tblgen`, etc.) that are -# only executabe on the RBE (x86) machine, so the strategy_regexp options are -# added to override and run the actions using remote strategy. -build:rbe_cross_compile_base_xla --strategy_regexp='Generating code from table.*=remote' -build:rbe_cross_compile_base_xla --strategy_regexp='Generating flatbuffer files.*=remote' -build:rbe_cross_compile_base_xla --strategy_regexp='Executing genrule @llvm-project.*=remote' - # Test-related settings below this point # We cannot run cross-compiled tests on the remote Linux x86 VMs so we need to # force all tests to run locally on the Aarch64 host. test:rbe_cross_compile_base --strategy=TestRunner=local --build_tests_only test:rbe_cross_compile_base --verbose_failures=true --local_test_jobs=HOST_CPUS --test_output=errors -test:rbe_cross_compile_base_xla --config=rbe_cross_compile_base - # START LINUX AARCH64 CROSS-COMPILE CONFIGS build:cross_compile_linux_arm64 --config=cross_compile_base @@ -869,21 +928,11 @@ build:cross_compile_linux_arm64 --platforms=//tensorflow/tools/toolchains/cross_ build:cross_compile_linux_arm64 --cpu=aarch64 build:cross_compile_linux_arm64 --crosstool_top=//tensorflow/tools/toolchains/cross_compile/cc:cross_compile_toolchain_suite -# XLA uses different paths for platforms and crosstool_top. -build:cross_compile_linux_arm64_xla --config=cross_compile_base_xla -build:cross_compile_linux_arm64_xla --platforms=//tools/toolchains/cross_compile/config:linux_aarch64 -build:cross_compile_linux_arm64_xla --crosstool_top=//tools/toolchains/cross_compile/cc:cross_compile_toolchain_suite - # RBE cross-compile configs for Linux Aarch64 build:rbe_cross_compile_linux_arm64 --config=cross_compile_linux_arm64 build:rbe_cross_compile_linux_arm64 --config=rbe_cross_compile_base test:rbe_cross_compile_linux_arm64 --config=rbe_cross_compile_base -# RBE cross-compile configs for XLA Linux Aarch64 -build:rbe_cross_compile_linux_arm64_xla --config=cross_compile_linux_arm64_xla -build:rbe_cross_compile_linux_arm64_xla --config=rbe_cross_compile_base_xla -test:rbe_cross_compile_linux_arm64_xla --config=rbe_cross_compile_base_xla - # END LINUX AARCH64 CROSS-COMPILE CONFIGS # START MACOS CROSS-COMPILE CONFIGS @@ -919,5 +968,7 @@ test:rbe_cross_compile_macos_x86 --jobs=100 # END MACOS CROSS-COMPILE CONFIGS # END CROSS-COMPILE CONFIGS -# Try to load the XLA warnings config if available -try-import %workspace%/warnings.bazelrc \ No newline at end of file +# Enable Java 21 language features +common --java_runtime_version=remotejdk_21 + +# LINT.ThenChange(//xla/tensorflow.bazelrc) \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-platform/pom.xml b/tensorflow-core/tensorflow-core-platform/pom.xml index 181b1089b8d..ca6a014f0ae 100644 --- a/tensorflow-core/tensorflow-core-platform/pom.xml +++ b/tensorflow-core/tensorflow-core-platform/pom.xml @@ -55,12 +55,6 @@ ${project.version} macosx-arm64 - - org.tensorflow - tensorflow-core-native - ${project.version} - windows-x86_64 - @@ -73,7 +67,7 @@ - tensorflow-core-api.jar tensorflow-core-native.jar tensorflow-core-native-linux-x86_64.jar tensorflow-core-native-macosx-arm64.jar tensorflow-core-native-windows-x86_64.jar tensorflow-core-native-linux-arm64.jar + tensorflow-core-api.jar tensorflow-core-native.jar tensorflow-core-native-linux-x86_64.jar tensorflow-core-native-macosx-arm64.jar tensorflow-core-native-linux-arm64.jar ${java.module.name}