Source code for pykeen.nn.init

# -*- coding: utf-8 -*-

"""Embedding weight initialization routines."""

import math

import numpy as np
import torch.nn
import torch.nn.init

__all__ = [
    'xavier_uniform_',
    'xavier_normal_',
    'init_phases',
]


[docs]def xavier_uniform_(tensor, gain: float = 1.): r"""Initialize weights of the tensor similarly to Glorot/Xavier initialization. Proceed as if it was a linear layer with fan_in of zero and Xavier uniform initialization is used, i.e. fill the weight of input `embedding` with values values sampled from :math:`\mathcal{U}(-a, a)` where .. math:: a = \text{gain} \times \sqrt{\frac{6}{\text{embedding_dim}}} :param tensor: A tensor :param gain: An optional scaling factor, defaults to 1.0. :return: Embedding with weights by the Xavier uniform initializer. """ bound = gain * 6 / math.sqrt(tensor.shape[-1]) torch.nn.init.uniform_(tensor, -bound, bound) return tensor
[docs]def xavier_normal_(tensor: torch.Tensor, gain: float = 1.0) -> torch.Tensor: r"""Initialize weights of the tensor similarly to Glorot/Xavier initialization. Proceed as if it was a linear layer with fan_in of zero and Xavier normal initialization is used. Fill the weight of input `embedding` with values values sampled from :math:`\mathcal{N}(0, a^2)` where .. math:: a = \text{gain} \times \sqrt{\frac{2}{\text{embedding_dim}}} :param tensor: A tensor :param gain: An optional scaling factor, defaults to 1.0. :return: Embedding with weights by the Xavier normal initializer. """ std = gain * 2 / math.sqrt(tensor.shape[-1]) torch.nn.init.normal_(tensor, mean=0., std=std) return tensor
[docs]def init_phases(x: torch.Tensor) -> torch.Tensor: r"""Generate random phases between 0 and :math:`2\pi`.""" phases = 2 * np.pi * torch.rand_like(x[..., :x.shape[-1] // 2]) return torch.cat([torch.cos(phases), torch.sin(phases)], dim=-1).detach()