Skip to content

Commit

Permalink
Move from deprecated distutils to packaging (#5687)
Browse files Browse the repository at this point in the history
- converts all calls from distutils to packaging as distutils
  is deprecated and will be removed sooner or later

Signed-off-by: Janusz Lisiecki <jlisiecki@nvidia.com>
  • Loading branch information
JanuszL authored Oct 25, 2024
1 parent af14919 commit 52d314c
Show file tree
Hide file tree
Showing 25 changed files with 401 additions and 271 deletions.
34 changes: 34 additions & 0 deletions Acknowledgements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4410,3 +4410,37 @@ products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.

==============================================================================
str2bool


BSD 3-Clause License

Copyright (c) 2017, SymonSoft
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.

* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2 changes: 2 additions & 0 deletions conda/dali_python_bindings/recipe/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ requirements:
- astunparse >=1.6.0
- gast >=0.3.3
- dm-tree >=0.1.8
- packaging
- nvidia-dali-core{% if environ.get('NVIDIA_DALI_BUILD_FLAVOR', '')|length %}{{"-" + environ.get('NVIDIA_DALI_BUILD_FLAVOR', '')}}{% endif %}-cuda{{ environ.get('CUDA_VERSION', '') | replace(".","") }} ={{ environ.get('DALI_CONDA_BUILD_VERSION', '') }}
- nvidia-nvimagecodec-cuda{{ environ.get('CUDA_VERSION', '') | replace(".","") }}
run:
Expand All @@ -94,6 +95,7 @@ requirements:
- astunparse >=1.6.0
- gast >=0.3.3
- dm-tree >=0.1.8
- packaging
- nvidia-dali-core{% if environ.get('NVIDIA_DALI_BUILD_FLAVOR', '')|length %}{{"-" + environ.get('NVIDIA_DALI_BUILD_FLAVOR', '')}}{% endif %}-cuda{{ environ.get('CUDA_VERSION', '') | replace(".","") }} ={{ environ.get('DALI_CONDA_BUILD_VERSION', '') }}
- nvidia-nvimagecodec-cuda{{ environ.get('CUDA_VERSION', '') | replace(".","") }}
about:
Expand Down
14 changes: 12 additions & 2 deletions dali/python/nvidia/dali/_autograph/pyct/gast_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,19 @@
import functools
import gast

from distutils.version import LooseVersion
from packaging.version import Version


def convert_to_version(function):
"""Makes sure that returned function value is a Version object"""

def wrap_function(*args, **kwargs):
return Version(function(*args, **kwargs))

return wrap_function


@convert_to_version
def get_gast_version():
"""Gast exports `__version__` from 0.5.3 onwards, we need to look it up in a different way."""
if hasattr(gast, "__version__"):
Expand Down Expand Up @@ -76,7 +86,7 @@ def _compat_assign_gast_5(targets, value, type_comment):
return gast.Assign(targets=targets, value=value, type_comment=type_comment)


if get_gast_version() < LooseVersion("0.5"):
if get_gast_version() < Version("0.5"):
compat_assign = _compat_assign_gast_4
else:
compat_assign = _compat_assign_gast_5
Expand Down
4 changes: 2 additions & 2 deletions dali/python/nvidia/dali/plugin/jax/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,15 @@

from . import fn # noqa: F401

from distutils.version import LooseVersion
from packaging.version import Version
from .iterator import DALIGenericIterator, data_iterator

assert (
sys.version_info.major == 3 and sys.version_info.minor >= 8
), "DALI JAX support requires Python 3.8 or above"


assert LooseVersion(jax.__version__) >= LooseVersion(
assert Version(jax.__version__) >= Version(
"0.4.11"
), "DALI JAX support requires JAX 0.4.11 or above"

Expand Down
4 changes: 2 additions & 2 deletions dali/python/nvidia/dali/plugin/jax/fn/_jax_function_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

from typing import Optional, Protocol, Tuple, Union

from distutils.version import LooseVersion
from packaging.version import Version

import jax
import jax.dlpack
Expand Down Expand Up @@ -170,7 +170,7 @@ def flip_horizontal(image: jax.Array):
The transformed function that processes DALI-traced batches (DataNodes).
"""

if LooseVersion(jax.__version__) < LooseVersion("0.4.16"):
if Version(jax.__version__) < Version("0.4.16"):
raise RuntimeError("DALI `jax_function` requires JAX 0.4.16 or above.")

def decorator(function):
Expand Down
14 changes: 7 additions & 7 deletions dali/python/nvidia/dali/plugin/numba/experimental/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from distutils.version import LooseVersion
from packaging.version import Version

from nvidia.dali.pipeline import Pipeline
from nvidia.dali.data_node import DataNode as _DataNode
Expand Down Expand Up @@ -57,8 +57,8 @@

# Minimal version of Numba that is required for Numba GPU operator to work
minimal_numba_version = {
11: LooseVersion("0.55.2"),
12: LooseVersion("0.57.0"),
11: Version("0.55.2"),
12: Version("0.57.0"),
}


Expand Down Expand Up @@ -196,7 +196,7 @@ def _get_run_fn_gpu(self, run_fn, types, dims):
for dali_type, ndim in zip(types, dims):
cuda_arguments.append(numba_types.Array(_to_numba[dali_type], ndim, "C"))

if LooseVersion(nb.__version__) < LooseVersion("0.57.0"):
if Version(nb.__version__) < Version("0.57.0"):
cres = cuda.compiler.compile_cuda(run_fn, numba_types.void, cuda_arguments)
else:
pipeline = Pipeline.current()
Expand All @@ -210,7 +210,7 @@ def _get_run_fn_gpu(self, run_fn, types, dims):
code = run_fn.__code__
filename = code.co_filename
linenum = code.co_firstlineno
if LooseVersion(nb.__version__) < LooseVersion("0.57.0"):
if Version(nb.__version__) < Version("0.57.0"):
nvvm_options["debug"] = False
nvvm_options["lineinfo"] = False
lib, _ = tgt_ctx.prepare_cuda_kernel(
Expand Down Expand Up @@ -509,7 +509,7 @@ def __init__(

@staticmethod
def _check_minimal_numba_version(throw: bool = True):
current_version = LooseVersion(nb.__version__)
current_version = Version(nb.__version__)
toolkit_version = cuda.runtime.get_version()
if toolkit_version[0] not in minimal_numba_version:
if throw:
Expand All @@ -522,7 +522,7 @@ def _check_minimal_numba_version(throw: bool = True):
raise RuntimeError(
f"Insufficient Numba version. Numba GPU operator "
f"requires Numba {str(min_ver)} or higher. "
f"Detected version: {str(LooseVersion(nb.__version__))}."
f"Detected version: {str(Version(nb.__version__))}."
)
else:
return False
Expand Down
8 changes: 3 additions & 5 deletions dali/python/nvidia/dali/plugin/paddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,17 @@

import numpy as np
import paddle
from distutils.version import LooseVersion
from packaging.version import Version

from nvidia.dali import types
from nvidia.dali.backend import TensorListCPU, TensorGPU, TensorListGPU
from nvidia.dali.plugin.base_iterator import _DaliBaseIterator
from nvidia.dali.plugin.base_iterator import LastBatchPolicy

if isinstance(paddle.__version__, str):
assert LooseVersion(paddle.__version__) == LooseVersion("0.0.0") or LooseVersion(
assert Version(paddle.__version__) == Version("0.0.0") or Version(
paddle.__version__
) >= LooseVersion(
"2.0.0"
), "DALI PaddlePaddle support requires Paddle develop or release >= 2.0.0"
) >= Version("2.0.0"), "DALI PaddlePaddle support requires Paddle develop or release >= 2.0.0"


dtype_map = {
Expand Down
14 changes: 7 additions & 7 deletions dali/python/nvidia/dali/plugin/tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from nvidia.dali._utils.external_source_impl import _get_generator_from_source_desc
from nvidia.dali._utils.external_source_impl import _cycle_enabled

from distutils.version import LooseVersion
from packaging.version import Version
import warnings

from nvidia.dali_tf_plugin import dali_tf_plugin
Expand Down Expand Up @@ -307,29 +307,29 @@ def DALIRawIterator():


def _get_tf_version():
return LooseVersion(tf.__version__)
return Version(tf.__version__)


MIN_TENSORFLOW_VERSION = LooseVersion("1.15")
MIN_TENSORFLOW_VERSION = Version("1.15")


def dataset_compatible_tensorflow():
"""Returns ``True`` if current TensorFlow version is compatible with DALIDataset."""
return LooseVersion(tf.__version__) >= MIN_TENSORFLOW_VERSION
return Version(tf.__version__) >= MIN_TENSORFLOW_VERSION


def dataset_inputs_compatible_tensorflow():
"""Returns ``True`` if the current TensorFlow version is compatible with
experimental.DALIDatasetWithInputs and input Datasets can be used with DALI.
"""
return LooseVersion(tf.__version__) >= LooseVersion("2.4.1")
return Version(tf.__version__) >= Version("2.4.1")


def dataset_distributed_compatible_tensorflow():
"""Returns ``True`` if the tf.distribute APIs for current TensorFlow version are compatible
with DALIDataset.
"""
return LooseVersion(tf.__version__) >= LooseVersion("2.5.0")
return Version(tf.__version__) >= Version("2.5.0")


def _get_experimental():
Expand Down Expand Up @@ -813,7 +813,7 @@ def _as_variant_tensor(self):
fail_on_device_mismatch=self._fail_on_device_mismatch,
)

if _get_tf_version() < LooseVersion("2.0"):
if _get_tf_version() < Version("2.0"):

class _DALIDatasetImpl(dataset_ops.DatasetV1Adapter):
@functools.wraps(_DALIDatasetV2.__init__)
Expand Down
1 change: 1 addition & 0 deletions dali/python/setup.py.in
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ For more details please check the
# 1.16 on python 3.12 due to import six.moves
'six >= 1.16',
'dm-tree',
'packaging',
@DALI_INSTALL_REQUIRES_NVIMGCODEC@
],
)
Expand Down
4 changes: 2 additions & 2 deletions dali/test/python/autograph/pyct/test_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import unittest

import gast
from distutils.version import LooseVersion
from packaging.version import Version

from nvidia.dali._autograph.pyct import ast_util
from nvidia.dali._autograph.pyct import gast_util
Expand Down Expand Up @@ -79,7 +79,7 @@ def test_load_ast(self):
decorator_list=[],
returns=None,
type_comment=None,
**{"type_params": []} if gast_util.get_gast_version() >= LooseVersion("0.5.5") else {},
**{"type_params": []} if gast_util.get_gast_version() >= Version("0.5.5") else {},
)

module, source, _ = loader.load_ast(node)
Expand Down
6 changes: 3 additions & 3 deletions dali/test/python/test_dali_tf_dataset_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from shutil import rmtree as remove_directory
import tensorflow as tf
import tensorflow.compat.v1 as tf_v1
from distutils.version import StrictVersion
from packaging.version import Version
from nose import SkipTest


Expand Down Expand Up @@ -116,7 +116,7 @@ def run_keras_single_device(device="cpu", device_id=0):


def graph_model(images, reuse, is_training):
if StrictVersion(tf.__version__) >= StrictVersion("2.16"):
if Version(tf.__version__) >= Version("2.16"):
raise SkipTest("TF < 2.16 is required for this test")
with tf_v1.variable_scope("mnist_net", reuse=reuse):
images = tf_v1.layers.flatten(images)
Expand Down Expand Up @@ -196,7 +196,7 @@ def _run_config(device="cpu", device_id=0):


def run_estimators_single_device(device="cpu", device_id=0):
if StrictVersion(tf.__version__) < StrictVersion("2.16"):
if Version(tf.__version__) < Version("2.16"):
with tf.device("/{0}:{1}".format(device, device_id)):
model = keras_model()
model = tf.keras.estimator.model_to_estimator(
Expand Down
4 changes: 2 additions & 2 deletions dali/test/python/test_dali_tf_dataset_mnist_eager.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from test_utils_tensorflow import skip_for_incompatible_tf, available_gpus
from nose_utils import raises
from nose import SkipTest
from distutils.version import LooseVersion
from packaging.version import Version

tf.compat.v1.enable_eager_execution()

Expand Down Expand Up @@ -60,7 +60,7 @@ def test_keras_wrong_placement_cpu():
def test_keras_multi_gpu_mirrored_strategy():
# due to compatibility problems between the driver, cuda version and
# TensorFlow 2.12 test_keras_multi_gpu_mirrored_strategy doesn't work.
if LooseVersion(tf.__version__) >= LooseVersion("2.12.0"):
if Version(tf.__version__) >= Version("2.12.0"):
raise SkipTest("This test is not supported for TensorFlow 2.12")
strategy = tf.distribute.MirroredStrategy(devices=available_gpus())

Expand Down
12 changes: 6 additions & 6 deletions dali/test/python/test_dali_tf_dataset_mnist_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,35 +16,35 @@
import tensorflow.compat.v1 as tf_v1
from nose_utils import with_setup, SkipTest, raises
import test_dali_tf_dataset_mnist as mnist
from distutils.version import StrictVersion
from packaging.version import Version

mnist.tf.compat.v1.disable_eager_execution()


@with_setup(tf.keras.backend.clear_session)
def test_keras_single_gpu():
if StrictVersion(tf.__version__) >= StrictVersion("2.16"):
if Version(tf.__version__) >= Version("2.16"):
raise SkipTest("TF < 2.16 is required for this test")
mnist.run_keras_single_device("gpu", 0)


@with_setup(tf.keras.backend.clear_session)
def test_keras_single_other_gpu():
if StrictVersion(tf.__version__) >= StrictVersion("2.16"):
if Version(tf.__version__) >= Version("2.16"):
raise SkipTest("TF < 2.16 is required for this test")
mnist.run_keras_single_device("gpu", 1)


@with_setup(tf.keras.backend.clear_session)
def test_keras_single_cpu():
if StrictVersion(tf.__version__) >= StrictVersion("2.16"):
if Version(tf.__version__) >= Version("2.16"):
raise SkipTest("TF < 2.16 is required for this test")
mnist.run_keras_single_device("cpu", 0)


@raises(tf.errors.OpError, "TF device and DALI device mismatch. TF*: CPU, DALI*: GPU for output")
def test_keras_wrong_placement_gpu():
if StrictVersion(tf.__version__) >= StrictVersion("2.16"):
if Version(tf.__version__) >= Version("2.16"):
raise SkipTest("TF < 2.16 is required for this test")
with tf.device("cpu:0"):
model = mnist.keras_model()
Expand All @@ -55,7 +55,7 @@ def test_keras_wrong_placement_gpu():

@raises(tf.errors.OpError, "TF device and DALI device mismatch. TF*: GPU, DALI*: CPU for output")
def test_keras_wrong_placement_cpu():
if StrictVersion(tf.__version__) >= StrictVersion("2.16"):
if Version(tf.__version__) >= Version("2.16"):
raise SkipTest("TF < 2.16 is required for this test")
with tf.device("gpu:0"):
model = mnist.keras_model()
Expand Down
6 changes: 3 additions & 3 deletions dali/test/python/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
import subprocess
import sys
import tempfile
from distutils.version import LooseVersion
from packaging.version import Version
from nose_utils import SkipTest


Expand Down Expand Up @@ -945,8 +945,8 @@ def check_numba_compatibility_cpu(if_skip=True):
# Numba bug:
# https://github.com/numba/numba/issues/8567
if platform.processor().lower() in ("arm64", "aarch64", "armv8") and (
LooseVersion(numba.__version__) >= LooseVersion("0.57.0")
and LooseVersion(numba.__version__) < LooseVersion("0.59.0")
Version(numba.__version__) >= Version("0.57.0")
and Version(numba.__version__) < Version("0.59.0")
):
if if_skip:
raise SkipTest()
Expand Down
Loading

0 comments on commit 52d314c

Please sign in to comment.