Source code for gpytorch.kernels.rbf_kernel

#!/usr/bin/env python3

from ..functions import RBFCovariance
from ..settings import trace_mode
from .kernel import Kernel


def postprocess_rbf(dist_mat):
    return dist_mat.div_(-2).exp_()


[docs]class RBFKernel(Kernel): r""" Computes a covariance matrix based on the RBF (squared exponential) kernel between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`: .. math:: \begin{equation*} k_{\text{RBF}}(\mathbf{x_1}, \mathbf{x_2}) = \exp \left( -\frac{1}{2} (\mathbf{x_1} - \mathbf{x_2})^\top \Theta^{-2} (\mathbf{x_1} - \mathbf{x_2}) \right) \end{equation*} where :math:`\Theta` is a lengthscale parameter. See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options. .. note:: This kernel does not have an `outputscale` parameter. To add a scaling parameter, decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`. Args: ard_num_dims (int, optional): Set this if you want a separate lengthscale for each input dimension. It should be `d` if x1 is a `n x d` matrix. Default: `None` batch_shape (torch.Size, optional): Set this if you want a separate lengthscale for each batch of input data. It should be `b` if x1 is a `b x n x d` tensor. Default: `torch.Size([])`. active_dims (tuple of ints, optional): Set this if you want to compute the covariance of only a few input dimensions. The ints corresponds to the indices of the dimensions. Default: `None`. lengthscale_prior (Prior, optional): Set this if you want to apply a prior to the lengthscale parameter. Default: `None`. lengthscale_constraint (Constraint, optional): Set this if you want to apply a constraint to the lengthscale parameter. Default: `Positive`. eps (float): The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`. Attributes: lengthscale (Tensor): The lengthscale parameter. Size/shape of parameter depends on the ard_num_dims and batch_shape arguments. Example: >>> x = torch.randn(10, 5) >>> # Non-batch: Simple option >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) >>> # Non-batch: ARD (different lengthscale for each input dimension) >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=5)) >>> covar = covar_module(x) # Output: LazyTensor of size (10 x 10) >>> >>> batch_x = torch.randn(2, 10, 5) >>> # Batch: Simple option >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) >>> # Batch: different lengthscale for each batch >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(batch_shape=torch.Size([2]))) >>> covar = covar_module(x) # Output: LazyTensor of size (2 x 10 x 10) """ has_lengthscale = True def forward(self, x1, x2, diag=False, **params): if ( x1.requires_grad or x2.requires_grad or (self.ard_num_dims is not None and self.ard_num_dims > 1) or diag or params.get("last_dim_is_batch", False) or trace_mode.on() ): x1_ = x1.div(self.lengthscale) x2_ = x2.div(self.lengthscale) return self.covar_dist( x1_, x2_, square_dist=True, diag=diag, dist_postprocess_func=postprocess_rbf, postprocess=True, **params ) return RBFCovariance.apply( x1, x2, self.lengthscale, lambda x1, x2: self.covar_dist( x1, x2, square_dist=True, diag=False, dist_postprocess_func=postprocess_rbf, postprocess=False, **params ), )