update_docstring_with_resolver_keys

update_docstring_with_resolver_keys(*resolver_keys: ResolverKey) Callable[[F], F][source]

Build a decorator to add information about resolved parameter pairs.

The decorator is intended for methods with follow the param + param_kwargs pattern and internally use a class resolver.

from typing import Any
from torch import Tensor, nn
from class_resolver import update_docstring_with_resolver_keys, ResolverKey
from class_resolver.contrib.torch import activation_resolver

@update_docstring_with_resolver_keys(
    ResolverKey("activation", "class_resolver.contrib.torch.activation_resolver")
)
def f(
    tensor: Tensor,
    activation: None | str | type[nn.Module] | nn.Module,
    activation_kwargs: dict[str, Any] | None,
):
    _activation = activation_resolver.make(activation, activation_kwargs)
    return _activation(tensor)

This also can be stacked for multiple resolvers.

from typing import Any
from torch import Tensor, nn
from class_resolver import update_docstring_with_resolver_keys
from class_resolver.contrib.torch import activation_resolver, aggregation_resolver

@update_docstring_with_resolver_keys(
    ResolverKey("activation", "class_resolver.contrib.torch.activation_resolver"),
    ResolverKey("aggregation", "class_resolver.contrib.torch.aggregation_resolver"),
)
def f(
    tensor: Tensor,
    activation: None | str | type[nn.Module] | nn.Module,
    activation_kwargs: dict[str, Any] | None,
    aggregation: None | str | type[nn.Module] | nn.Module,
    aggregation_kwargs: dict[str, Any] | None,
):
    _activation = activation_resolver.make(activation, activation_kwargs)
    _aggregation = aggregation_resolver.make(aggregation, aggregation_kwargs)
    return _aggregation(_activation(tensor))

It might be the case that you have two different arguments that use the same resolver. No prob!

from typing import Any
from torch import Tensor, nn
from class_resolver import update_docstring_with_resolver_keys
from class_resolver.contrib.torch import activation_resolver, aggregation_resolver

@update_docstring_with_resolver_keys(
    ResolverKey("activation_1", "class_resolver.contrib.torch.activation_resolver"),
    ResolverKey("activation_2", "class_resolver.contrib.torch.activation_resolver"),
    ResolverKey("aggregation", "class_resolver.contrib.torch.aggregation_resolver"),
)
def f(
    tensor: Tensor,
    activation_1: None | str | type[nn.Module] | nn.Module,
    activation_1_kwargs: dict[str, Any] | None,
    aggregation: None | str | type[nn.Module] | nn.Module,
    aggregation_kwargs: dict[str, Any] | None,
    activation_2: None | str | type[nn.Module] | nn.Module,
    activation_2_kwargs: dict[str, Any] | None,
):
    _activation_1 = activation_resolver.make(activation_1, activation_1_kwargs)
    _activation_2 = activation_resolver.make(activation_2, activation_2_kwargs)
    _aggregation = aggregation_resolver.make(aggregation, aggregation_kwargs)
    return _activation_2(_aggregation(_activation_2(tensor)))
Parameters:

resolver_keys

A variadic list of keys, each describing:

  1. the names of the parameter

  2. the resolver used to construct a reference via the :data: role.

  3. the name of the parameter for giving keyword arguments. By default, this is constructed by taking the name and post-pending _kwargs.

Returns:

a decorator which extends a function’s docstring.

Raises:

ValueError – When either no parameter name was provided, there was a duplicate parameter name.