Skip to content

Commit

Permalink
Merge pull request #2543 from ROCm/develop-upstream-sync-20240513
Browse files Browse the repository at this point in the history
Develop upstream sync 20240513
  • Loading branch information
i-chaochen authored May 20, 2024
2 parents 1fedf22 + cfe441c commit f802114
Show file tree
Hide file tree
Showing 2,977 changed files with 135,098 additions and 72,055 deletions.
73 changes: 35 additions & 38 deletions .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -51,16 +51,13 @@
# Remote build execution options (only configured to work with TF team projects for now.)
# rbe_base: General RBE options shared by all flavors.
# rbe_linux: General RBE options used on all linux builds.
# rbe_win: General RBE options used on all windows builds.
# rbe_win_base: General RBE options used on all Windows builds. Not to be used standalone.
# rbe_win_clang: Options specific to compiling using Clang.
#
# rbe_linux_cpu: RBE options to build with only CPU support.
# rbe_linux_cuda: RBE options to build with GPU support using clang.
# rbe_linux_cuda_nvcc: RBE options to build with GPU support using nvcc.
#
# rbe_win_py39: Windows Python 3.9 RBE config
#
# tensorflow_testing_rbe_win: RBE options to use RBE with tensorflow-testing project on windows
#
# Embedded Linux options (experimental and only tested with TFLite build yet)
# elinux: General Embedded Linux options shared by all flavors.
# elinux_aarch64: Embedded Linux options for aarch64 (ARM64) CPU support.
Expand Down Expand Up @@ -473,6 +470,17 @@ build:win_clang --host_linkopt=/FORCE:MULTIPLE
test:win_clang --linkopt=/FORCE:MULTIPLE
test:win_clang --host_linkopt=/FORCE:MULTIPLE

# Same config as above but for XLA, which has different toolchain paths
build:win_clang_xla --copt=/clang:-Weverything
build:win_clang_xla --extra_toolchains=@local_config_cc//:cc-toolchain-x64_windows-clang-cl
build:win_clang_xla --extra_execution_platforms=//tools/toolchains/win:x64_windows-clang-cl
build:win_clang_xla --host_platform=//tools/toolchains/win:x64_windows-clang-cl
build:win_clang_xla --compiler=clang-cl
build:win_clang_xla --linkopt=/FORCE:MULTIPLE
build:win_clang_xla --host_linkopt=/FORCE:MULTIPLE
test:win_clang_xla --linkopt=/FORCE:MULTIPLE
test:win_clang_xla --host_linkopt=/FORCE:MULTIPLE

# Options to build TensorFlow 1.x or 2.x.
# TODO(kanglan): Change v2's define to default behavior
build:v2 --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1
Expand Down Expand Up @@ -581,38 +589,25 @@ build:rbe_linux_cuda_nvcc --config=rbe_linux_cuda
build:rbe_linux_cuda_nvcc --config=nvcc_clang
build:rbe_linux_cuda_nvcc --repo_env TF_NCCL_USE_STUB=1

# TODO(kanglan): Remove rbe_win and rbe_win_py3* after b/289091160 is fixed
build:rbe_win --config=rbe_base
build:rbe_win --crosstool_top="//tensorflow/tools/toolchains/win/tf_win_05022023:toolchain"
build:rbe_win --extra_toolchains="//tensorflow/tools/toolchains/win/tf_win_05022023:cc-toolchain-x64_windows"
build:rbe_win --extra_execution_platforms="//tensorflow/tools/toolchains/win:rbe_windows_ltsc2019"
build:rbe_win --host_platform="//tensorflow/tools/toolchains/win:rbe_windows_ltsc2019"
build:rbe_win --platforms="//tensorflow/tools/toolchains/win:rbe_windows_ltsc2019"
build:rbe_win --shell_executable=C:\\tools\\msys64\\usr\\bin\\bash.exe
build:rbe_win --experimental_strict_action_env=true

# TODO(gunan): Remove once we use MSVC 2019 with latest patches.
build:rbe_win --define=override_eigen_strong_inline=true

build:rbe_win_base --config=rbe_base
build:rbe_win_base --shell_executable=C:\\tools\\msys64\\usr\\bin\\bash.exe
build:rbe_win_base --remote_instance_name=projects/tensorflow-testing/instances/windows
# Don't build the python zip archive in the RBE build.
build:rbe_win --remote_download_minimal
build:rbe_win --enable_runfiles
build:rbe_win --nobuild_python_zip

build:rbe_win_py38 --config=rbe_base
build:rbe_win_py38 --repo_env=PYTHON_BIN_PATH=C:\\Python38\\python.exe
build:rbe_win_py38 --repo_env=PYTHON_LIB_PATH=C:\\Python38\\lib\\site-packages
build:rbe_win_py38 --repo_env=TF_PYTHON_CONFIG_REPO=//tensorflow/tools/toolchains/win_1803/py38
build:rbe_win_py38 --python_path=C:\\Python38\\python.exe

build:rbe_win_py39 --config=rbe_base
build:rbe_win_py39 --repo_env=PYTHON_BIN_PATH=C:\\Python39\\python.exe
build:rbe_win_py39 --repo_env=PYTHON_LIB_PATH=C:\\Python39\\lib\\site-packages
build:rbe_win_py39 --repo_env=TF_PYTHON_CONFIG_REPO=//tensorflow/tools/toolchains/win_1803/py39
build:rbe_win_py39 --python_path=C:\\Python39\\python.exe

# TODO(kanglan): Merge tensorflow_testing_rbe_win into rbe_win
common:tensorflow_testing_rbe_win --remote_instance_name=projects/tensorflow-testing/instances/windows
build:rbe_win_base --remote_download_minimal
build:rbe_win_base --enable_runfiles
build:rbe_win_base --nobuild_python_zip
build:rbe_win_base --define=override_eigen_strong_inline=true

build:rbe_win_clang --config=rbe_win_base
build:rbe_win_clang --crosstool_top="//tensorflow/tools/toolchains/win/20240424:toolchain"
build:rbe_win_clang --extra_toolchains="//tensorflow/tools/toolchains/win/20240424:cc-toolchain-x64_windows-clang-cl"
build:rbe_win_clang --extra_execution_platforms="//tensorflow/tools/toolchains/win:x64_windows-clang-cl"
build:rbe_win_clang --host_platform="//tensorflow/tools/toolchains/win:x64_windows-clang-cl"
build:rbe_win_clang --platforms="//tensorflow/tools/toolchains/win:x64_windows-clang-cl"
build:rbe_win_clang --compiler=clang-cl
build:rbe_win_clang --linkopt=/FORCE:MULTIPLE
build:rbe_win_clang --host_linkopt=/FORCE:MULTIPLE

# END TF REMOTE BUILD EXECUTION OPTIONS

# TFLite build configs for generic embedded Linux
Expand Down Expand Up @@ -855,7 +850,7 @@ test:linux_cuda_pycpp_test --config=linux_cuda_pycpp_test_filters -- //tensorflo
build:linux_arm64_pycpp_test_filters --test_tag_filters=-no_oss,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only
build:linux_arm64_pycpp_test_filters --build_tag_filters=-no_oss,-no_aarch64,-oss_excluded,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only
build:linux_arm64_pycpp_test_filters --test_lang_filters=cc,py --test_size_filters=small,medium --flaky_test_attempts=3
# TODO(michaelhudgins): Why do we need to specifically omit go and java here?
# TODO(michaelhudgins): Why do we need to specifically omit go and java here?
build:linux_arm64_pycpp_test --config=linux_arm64_pycpp_test_filters -- //tensorflow/... -//tensorflow/python/integration_testing/... -//tensorflow/compiler/tf2tensorrt/... -//tensorflow/core/tpu/... -//tensorflow/lite/... -//tensorflow/tools/toolchains/... -//tensorflow/go/... -//tensorflow/java/... -//tensorflow/core/grappler/optimizers:auto_mixed_precision_test_cpu -//tensorflow/core/grappler/optimizers:remapper_test_cpu -//tensorflow/core/kernels/image:resize_bicubic_op_test -//tensorflow/compiler/mlir/tfr/examples/customization:test_ops_test -//tensorflow/compiler/mlir/tfr/examples/mnist:mnist_ops_test -//tensorflow/compiler/mlir/tfr/examples/pad:pad_ops_test -//tensorflow/python/tools:aot_compiled_test
# CROSS-COMPILE ARM64 PYCPP
build:cross_compile_linux_arm64_pycpp_test --config=linux_arm64_pycpp_test
Expand Down Expand Up @@ -964,7 +959,9 @@ build:cross_compile_macos_x86 --extra_toolchains=//tensorflow/tools/toolchains/c
build:cross_compile_macos_x86 --platform_mappings=tensorflow/tools/toolchains/cross_compile/config/platform_mappings

# RBE cross-compile configs for Darwin x86
build:rbe_cross_compile_macos_x86 --config=cross_compile_macos_x86
build:rbe_cross_compile_macos_x86 --config=cross_compile_macos_x86 --remote_download_minimal
build:rbe_cross_compile_macos_x86 --bes_backend="" --bes_results_url="" --bes_timeout="0s"
build:rbe_cross_compile_macos_x86 --experimental_remote_build_event_upload="minimal"
build:rbe_cross_compile_macos_x86 --config=rbe_cross_compile_base
build:rbe_cross_compile_macos_x86 --bes_upload_mode=nowait_for_upload_complete
test:rbe_cross_compile_macos_x86 --config=rbe_cross_compile_base
Expand Down
7 changes: 7 additions & 0 deletions RELEASE.md
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,13 @@
it's finished. The default is `False` for backward compatibility. Users of
`distributed_save` are recommended to set it to `True`.

* `tf.tpu.experimental.embedding.TPUEmbeddingV2`
* Add `compute_sparse_core_stats` for sparse core users to profile the
data with this API to get the `max_ids` and `max_unique_ids`. These
numbers will be needed to configure the sparse core embedding mid level
api.
* Remove the `preprocess_features` method since that's no longer needed.

## Thanks to our Contributors

This release contains contributions from many people at Google, as well as:
Expand Down
4 changes: 4 additions & 0 deletions tensorflow/c/c_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ limitations under the License.
#include <time.h>
#include <unistd.h>

#ifdef _WIN32
#include <process.h>
#endif

#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_experimental.h"
#include "tensorflow/c/env.h"
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/c/eager/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -296,8 +296,8 @@ TFE_TensorHandle* TFE_NewTensorHandle(const TF_Tensor* t, TF_Status* status) {
void TFE_DeleteTensorHandle(TFE_TensorHandle* h) {
if (h == nullptr) return;

tensorflow::profiler::TraceMe activity(
"TFE_DeleteTensorHandle", tensorflow::profiler::TraceMeLevel::kInfo);
tsl::profiler::TraceMe activity("TFE_DeleteTensorHandle",
tsl::profiler::TraceMeLevel::kInfo);
if (h) {
tensorflow::unwrap(h)->Unref();
}
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/c/eager/c_api_unified_experimental.cc
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ void TF_AbstractOpSetAttrType(TF_AbstractOp* op, const char* const attr_name,
Status status =
unwrap(op)->SetAttrType(attr_name, static_cast<DataType>(value));
TF_SetStatus(s, static_cast<TF_Code>(status.code()),
tsl::NullTerminatedMessage(status));
absl::StatusMessageAsCStr(status));
}

void TF_ExecuteOperation(TF_AbstractOp* op, int num_inputs,
Expand Down
5 changes: 1 addition & 4 deletions tensorflow/c/experimental/filesystem/plugins/windows/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,5 @@ cc_library(
"nobuilder",
"notap",
],
deps = [
"//tensorflow/c:tf_status",
"//tensorflow/c/experimental/filesystem:filesystem_interface",
],
deps = ["//tensorflow/c/experimental/filesystem:filesystem_interface"],
)
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ limitations under the License.
#include <string.h>

#include "tensorflow/c/experimental/filesystem/filesystem_interface.h"
#include "tensorflow/c/tf_status.h"

// Implementation of a filesystem for POSIX environments.
// This filesystem will support `file://` and empty (local) URI schemes.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/c/experimental/next_pluggable_device/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ void TF_LookupOrCreatePluginResource(
void* opaque_plugin_resource = create_func(create_func_args);
*new_resource = new tensorflow::PluginResource(
opaque_plugin_resource, plugin_resource_name, delete_func);
return tensorflow::OkStatus();
return absl::OkStatus();
});

if (cc_status.ok()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ absl::Status SetPjRtCBufferToTensor(PJRT_Buffer* c_buffer,

absl::StatusOr<xla::PjRtCApiClient*> GetPjRtCApiClient(
const DeviceType& device_type) {
TF_ASSIGN_OR_RETURN(tsl::StatusOr<xla::PjRtClient*> pjrt_client,
TF_ASSIGN_OR_RETURN(absl::StatusOr<xla::PjRtClient*> pjrt_client,
tensorflow::GetPjRtClient(device_type));
auto* pjrt_c_api_client = dynamic_cast<xla::PjRtCApiClient*>(*pjrt_client);
if (pjrt_c_api_client == nullptr) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ limitations under the License.

namespace tensorflow {

StatusOr<PJRT_Buffer*> GetPjRtCBufferFromTensor(const Tensor* tensor);
absl::StatusOr<PJRT_Buffer*> GetPjRtCBufferFromTensor(const Tensor* tensor);

absl::Status SetPjRtCBufferToTensor(PJRT_Buffer* c_buffer,
xla::PjRtCApiClient* c_api_client,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ Status CreateUninitializedResourceVariable(ImmediateExecutionContext* ctx,

// Note that if shape is unknown rank, shape.dim_sizes() will be empty, and
// shape.dims() will be -1.
gtl::InlinedVector<int64_t, 4> dim_sizes = shape.dim_sizes();
absl::InlinedVector<int64_t, 4UL> dim_sizes = shape.dim_sizes();
TF_RETURN_IF_ERROR(varhandle_op->SetAttrShape(
"shape", reinterpret_cast<const int64_t*>(dim_sizes.data()),
shape.dims()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -506,12 +506,11 @@ TEST_P(CSavedModelAPITest, LoadSavedModelWithUninitializedVariable) {
tensorflow::down_cast<tensorflow::TFSavedModelAPI*>(
tensorflow::unwrap(saved_model));
tensorflow::Variable* uninitialized_variable;
ASSERT_EQ(::tensorflow::OkStatus(),
model_api->GetVariable("uninitialized_variable",
&uninitialized_variable));
ASSERT_EQ(absl::OkStatus(), model_api->GetVariable("uninitialized_variable",
&uninitialized_variable));
ASSERT_EQ(tensorflow::DT_FLOAT, uninitialized_variable->dtype());

ASSERT_EQ(::tensorflow::OkStatus(),
ASSERT_EQ(absl::OkStatus(),
model_api->GetVariable("sub_module.uninitialized_variable",
&uninitialized_variable));
ASSERT_EQ(tensorflow::DT_INT64, uninitialized_variable->dtype());
Expand Down
24 changes: 14 additions & 10 deletions tensorflow/c/experimental/stream_executor/stream_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -200,14 +200,16 @@ void HostCallbackTrampoline(void* ctx, TF_Status* status) {
delete host_ctx;
}

class CStreamExecutor : public StreamExecutorInterface {
class CStreamExecutor : public StreamExecutor {
public:
explicit CStreamExecutor(SP_Device device, SP_DeviceFns* device_fns,
explicit CStreamExecutor(Platform* se_platform, SP_Device device,
SP_DeviceFns* device_fns,
SP_StreamExecutor* stream_executor,
SP_Platform* platform, SP_PlatformFns* platform_fns,
SP_TimerFns* timer_fns, const std::string& name,
int visible_device_count)
: device_(std::move(device)),
: StreamExecutor(se_platform),
device_(std::move(device)),
device_fns_(device_fns),
stream_executor_(stream_executor),
platform_(platform),
Expand Down Expand Up @@ -563,9 +565,13 @@ class CStreamExecutor : public StreamExecutorInterface {
return std::unique_ptr<EventInterface>(
new CEvent(&device_, stream_executor_));
}
std::unique_ptr<StreamInterface> GetStreamImplementation() override {
return std::unique_ptr<StreamInterface>(
new CStream(&device_, stream_executor_));
absl::StatusOr<std::unique_ptr<Stream>> CreateStream(
std::optional<std::variant<StreamPriority, int>> priority =
std::nullopt) override {
auto stream = std::make_unique<Stream>(
this, std::make_unique<CStream>(&device_, stream_executor_));
TF_RETURN_IF_ERROR(stream->Initialize(priority));
return std::move(stream);
}

private:
Expand Down Expand Up @@ -644,11 +650,9 @@ absl::StatusOr<std::unique_ptr<StreamExecutor>> CPlatform::GetUncachedExecutor(
c_status.get());
TF_RETURN_IF_ERROR(StatusFromTF_Status(c_status.get()));

auto executor = std::make_unique<CStreamExecutor>(
std::move(device), &device_fns_, &stream_executor_, &platform_,
return std::make_unique<CStreamExecutor>(
this, std::move(device), &device_fns_, &stream_executor_, &platform_,
&platform_fns_, &timer_fns_, name_, visible_device_count);
auto result = std::make_unique<StreamExecutor>(this, std::move(executor));
return result;
}

absl::Status InitStreamExecutorPlugin(void* dso_handle,
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/c/kernels_experimental.cc
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ void TF_AssignUpdateVariable(TF_OpKernelContext* ctx, int input_index,
Status status =
LookupResource(context, HandleFromInput(context, input_index), &variable);
if (!status.ok()) {
printf("Failed with error: %s\n", tsl::NullTerminatedMessage(status));
printf("Failed with error: %s\n", absl::StatusMessageAsCStr(status));
abort();
}
const Tensor& value = context->input(value_index);
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/c/tf_status_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ namespace tsl {
void Set_TF_Status_from_Status(TF_Status* tf_status,
const absl::Status& status) {
TF_SetStatus(tf_status, TSLCodeFromStatusCode(status.code()),
tsl::NullTerminatedMessage(status));
absl::StatusMessageAsCStr(status));
status.ForEachPayload(
[tf_status](absl::string_view key, const absl::Cord& value) {
std::string key_str(key);
Expand Down
4 changes: 4 additions & 0 deletions tensorflow/cc/experimental/libtf/impl/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ tf_cc_test(
":scalars",
":string",
":tensor_spec",
"//tensorflow/core:framework",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
],
Expand Down Expand Up @@ -123,6 +125,8 @@ tf_cc_test(
deps = [
":iostream", # Necessary for absl::VerifyTypeImplementsAbslHashCorrectly.
":tensor_spec",
"//tensorflow/core:framework",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"@com_google_absl//absl/hash:hash_testing",
Expand Down
2 changes: 2 additions & 0 deletions tensorflow/cc/experimental/libtf/impl/iostream_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ limitations under the License.
#include "tensorflow/cc/experimental/libtf/impl/scalars.h"
#include "tensorflow/cc/experimental/libtf/impl/string.h"
#include "tensorflow/cc/experimental/libtf/impl/tensor_spec.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"

namespace tf {
Expand Down
2 changes: 2 additions & 0 deletions tensorflow/cc/experimental/libtf/impl/tensor_spec_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ limitations under the License.
#include "tensorflow/cc/experimental/libtf/impl/tensor_spec.h"

#include "absl/hash/hash_testing.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"

namespace tf {
Expand Down
4 changes: 1 addition & 3 deletions tensorflow/cc/saved_model/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -544,9 +544,7 @@ cc_library(
name = "fingerprinting_utils",
srcs = ["fingerprinting_utils.cc"],
hdrs = ["fingerprinting_utils.h"],
visibility = [
"//tensorflow:__pkg__",
],
visibility = ["//visibility:private"],
deps = [
":constants",
"//tensorflow/core:lib",
Expand Down
2 changes: 0 additions & 2 deletions tensorflow/compiler/aot/codegen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -668,7 +668,6 @@ class {{CLASS}} final : public tensorflow::XlaCompiledCpuFunction {
set_static_data_program_shape(data, StaticProgramShape());
set_static_data_hlo_profile_printer_data(
data, StaticHloProfilePrinterData());
set_static_data_use_xla_runtime(data, {{USE_XLA_RUNTIME}});
{{ASSIGN_PROFILE_COUNTERS_SIZE}}
return data;
}();
Expand Down Expand Up @@ -822,7 +821,6 @@ class {{CLASS}} final : public tensorflow::XlaCompiledCpuFunction {
{"{{DECLS_FROM_OBJ_FILE}}",
absl::StrJoin(metadata_result.header_variable_decls, "\n")},
{"{{ENTRY}}", compile_result.entry_point},
{"{{USE_XLA_RUNTIME}}", opts.use_xla_runtime ? "true" : "false"},
{"{{HLO_PROFILE_PRINTER_DATA_SHIM_EXPRESSION}}",
metadata_result.hlo_profile_printer_data_access_shim},
{"{{INCLUDE_XLA_DATA_PROTO}}", include_xla_data_proto},
Expand Down
1 change: 0 additions & 1 deletion tensorflow/compiler/aot/codegen_test_h.golden
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ class MyClass final : public tensorflow::XlaCompiledCpuFunction {
set_static_data_program_shape(data, StaticProgramShape());
set_static_data_hlo_profile_printer_data(
data, StaticHloProfilePrinterData());
set_static_data_use_xla_runtime(data, false);

return data;
}();
Expand Down
2 changes: 0 additions & 2 deletions tensorflow/compiler/aot/tfcompile.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -319,8 +319,6 @@ def _tf_library(
] or []) + (include_standard_runtime_deps and [
# TODO(cwhipkey): only depend on kernel code that the model actually
# needed.
"@local_xla//xla/service/cpu/runtime:convolution_ffi",
"@local_xla//xla/service/cpu/runtime:rng_ffi",
"@local_xla//xla/service/cpu:runtime_conv2d",
"@local_xla//xla/service/cpu:runtime_custom_call_status",
"@local_xla//xla/service/cpu:runtime_key_value_sort",
Expand Down
Loading

0 comments on commit f802114

Please sign in to comment.