Source code for pykeen.models.mocks

# -*- coding: utf-8 -*-

"""Mock models that return fixed scores.

These are useful for baselines.
"""

from typing import Any, ClassVar, Mapping

import torch

from .base import Model
from ..triples import CoreTriplesFactory

__all__ = [
    "FixedModel",
]


[docs]class FixedModel(Model): r"""A mock model returning fixed scores. .. math :: score(h, r, t) = h \cdot |\mathcal{E}| \cdot |\mathcal{R}| + r \cdot |\mathcal{E}| + t --- name: Fixed Model citation: author: Berrendorf year: 2021 link: https://github.com/pykeen/pykeen/pull/691 github: pykeen/pykeen """ hpo_default: ClassVar[Mapping[str, Any]] = {} def __init__(self, *, triples_factory: CoreTriplesFactory, **_kwargs): super().__init__( triples_factory=triples_factory, ) self.num_entities = triples_factory.num_entities self.num_relations = triples_factory.num_relations # This empty 1-element tensor doesn't actually do anything, # but is necessary since models with no grad params blow # up the optimizer self.dummy = torch.nn.Parameter(torch.empty(1), requires_grad=True)
[docs] def collect_regularization_term(self): # noqa: D102 return 0.0
def _reset_parameters_(self): # noqa: D102 pass # Not needed for mock model def _generate_fake_scores( self, h: torch.FloatTensor, r: torch.FloatTensor, t: torch.FloatTensor, ) -> torch.FloatTensor: """Generate fake scores.""" return (h * (self.num_entities * self.num_relations) + r * self.num_entities + t).float().requires_grad_(True)
[docs] def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102 return self._generate_fake_scores(*hrt_batch.t()).unsqueeze(dim=-1)
[docs] def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102 return self._generate_fake_scores( h=hr_batch[:, 0:1], r=hr_batch[:, 1:2], t=torch.arange(self.num_entities).unsqueeze(dim=0), )
[docs] def score_r(self, ht_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102 return self._generate_fake_scores( h=ht_batch[:, 0:1], r=torch.arange(self.num_relations).unsqueeze(dim=0), t=ht_batch[:, 1:2], )
[docs] def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102 return self._generate_fake_scores( h=torch.arange(self.num_entities).unsqueeze(dim=0), r=rt_batch[:, 0:1], t=rt_batch[:, 1:2], )