"""Python implementation of ``__torch_function__``While most of the torch API and handling for ``__torch_function__`` happensat the C++ level, some of the torch API is written in Python so we needpython-level handling for ``__torch_function__`` overrides as well. The maindeveloper-facing functionality in this file are handle_torch_function andhas_torch_function. See torch/functional.py and test/test_overrides.pyfor usage examples.Note----heavily inspired by NumPy's ``__array_function__`` (see:https://github.com/pytorch/pytorch/issues/24015 andhttps://www.numpy.org/neps/nep-0018-array-function-protocol.html)If changing this file in a way that can affect ``__torch_function__`` overhead,please report the benchmarks in ``benchmarks/overrides_benchmark``. See theinstructions in the ``README.md`` in that directory."""import__future__# noqa: F404importcollectionsimportfunctoolsimporttypesimportwarningsfromtypingimportDict,Set,List,Any,Callable,Iterable,Type,Tuplefromfunctoolsimportwrapsimportcontextlibimporttorchfromtorch._Cimport(_has_torch_function,_has_torch_function_unary,_has_torch_function_variadic,_add_docstr,_push_on_torch_function_stack,_pop_torch_function_stack,_get_function_stack_at,_len_torch_function_stack,_is_torch_function_mode_enabled)__all__=["get_ignored_functions","get_overridable_functions","get_testing_overrides","handle_torch_function","has_torch_function","resolve_name","is_tensor_like","is_tensor_method_or_property","wrap_torch_function","enable_reentrant_dispatch",]def_disable_user_warnings(func:Callable,regex:str='.*is deprecated, please use.*',module:str='torch')->Callable:""" Decorator that temporarily disables ``UserWarning``s for the given ``module`` if the warning message matches the given ``regex`` pattern. Arguments --------- func : function Function to disable the warnings for. regex : str A regex pattern compilable by ``re.compile``. This is used to match the ``UserWarning`` message. module : str The python module to which the filtering should be restricted. Returns ------- function The wrapped function. """@wraps(func)defwrapper(*args,**kwargs):withwarnings.catch_warnings():warnings.filterwarnings("ignore",category=UserWarning,message=regex,module=module)returnfunc(*args,**kwargs)returnwrapper
[docs]@functools.lru_cache(None)@_disable_user_warningsdefget_ignored_functions()->Set[Callable]:""" Return public functions that cannot be overridden by ``__torch_function__``. Returns ------- Set[Callable] A tuple of functions that are publicly available in the torch API but cannot be overridden with ``__torch_function__``. Mostly this is because none of the arguments of these functions are tensors or tensor-likes. Examples -------- >>> torch.Tensor.as_subclass in torch.overrides.get_ignored_functions() True >>> torch.add in torch.overrides.get_ignored_functions() False """Tensor=torch.Tensorreturn{torch.typename,torch.is_tensor,torch.is_storage,torch.set_default_tensor_type,torch.set_default_device,torch.get_default_device,torch.set_rng_state,torch.get_rng_state,torch.manual_seed,torch.initial_seed,torch.seed,torch.save,torch.load,torch.set_printoptions,torch.fork,torch.get_default_dtype,torch.get_num_interop_threads,torch.get_num_threads,torch.init_num_threads,torch.import_ir_module,torch.import_ir_module_from_buffer,torch.is_anomaly_enabled,torch.is_anomaly_check_nan_enabled,torch.is_grad_enabled,torch.merge_type_from_type_comment,torch.parse_ir,torch.parse_schema,torch.parse_type_comment,torch.set_anomaly_enabled,torch.set_flush_denormal,torch.set_num_interop_threads,torch.set_num_threads,torch.wait,torch.as_tensor,torch.from_numpy,torch.get_device,torch.tensor,torch.default_generator,torch.has_cuda,torch.has_cudnn,torch.has_lapack,torch.device,torch.dtype,torch.finfo,torch.has_mkl,torch.has_mps,torch.has_mkldnn,torch.has_openmp,torch.iinfo,torch.memory_format,torch.qscheme,torch.set_grad_enabled,torch.no_grad,torch.enable_grad,torch.inference_mode,torch.is_inference_mode_enabled,torch.layout,torch.align_tensors,torch.arange,torch.as_strided,torch.bartlett_window,torch.blackman_window,torch.broadcast_shapes,torch.can_cast,torch.compile,torch.cudnn_affine_grid_generator,torch.cudnn_batch_norm,torch.cudnn_convolution,torch.cudnn_convolution_transpose,torch.cudnn_convolution_relu,torch.cudnn_convolution_add_relu,torch.cudnn_grid_sampler,torch.cudnn_is_acceptable,torch.empty,torch.empty_permuted,torch.empty_strided,torch.empty_quantized,torch.export.dynamic_dim,torch.export.export,torch.export.load,torch.export.register_dataclass,torch.export.save,torch.eye,torch.fft.fftfreq,torch.fft.rfftfreq,torch.from_file,torch.full,torch.fill,torch.hamming_window,torch.hann_window,torch.kaiser_window,torch.linspace,torch.logspace,torch.mkldnn_adaptive_avg_pool2d,torch.mkldnn_convolution,torch.mkldnn_max_pool2d,torch.mkldnn_max_pool3d,torch.mkldnn_linear_backward_weights,torch.mkldnn_rnn_layer,torch.normal,torch.ones,torch.promote_types,torch.rand,torch.randn,torch.randint,torch.randperm,torch.range,torch.result_type,torch.scalar_tensor,torch.sparse_coo_tensor,torch.sparse_compressed_tensor,torch.sparse_csr_tensor,torch.sparse_csc_tensor,torch.sparse_bsr_tensor,torch.sparse_bsc_tensor,torch.sym_constrain_range,torch.sym_constrain_range_for_size,torch.tril_indices,torch.triu_indices,torch.vander,torch.zeros,torch._jit_internal.boolean_dispatch,torch.nn.functional.assert_int_or_pair,torch.nn.functional.upsample,torch.nn.functional.upsample_bilinear,torch.nn.functional.upsample_nearest,torch.nn.functional.has_torch_function,torch.nn.functional.has_torch_function_unary,torch.nn.functional.has_torch_function_variadic,torch.nn.functional.handle_torch_function,torch.nn.functional.sigmoid,torch.nn.functional.hardsigmoid,torch.nn.functional.tanh,torch.nn.functional._canonical_mask,torch.nn.functional._none_or_dtype,# Doesn't actually take or return tensor argumentstorch.nn.init.calculate_gain,# These are deprecated; don't test themtorch.nn.init.uniform,torch.nn.init.normal,torch.nn.init.constant,torch.nn.init.eye,torch.nn.init.dirac,torch.nn.init.xavier_uniform,torch.nn.init.xavier_normal,torch.nn.init.kaiming_uniform,torch.nn.init.kaiming_normal,torch.nn.init.orthogonal,torch.nn.init.sparse,torch.nested.to_padded_tensor,has_torch_function,handle_torch_function,torch.set_autocast_enabled,torch.is_autocast_enabled,torch.clear_autocast_cache,torch.set_autocast_cpu_enabled,torch.is_autocast_cpu_enabled,torch.set_autocast_xla_enabled,torch.is_autocast_xla_enabled,torch.set_autocast_ipu_enabled,torch.is_autocast_ipu_enabled,torch.set_autocast_cpu_dtype,torch.get_autocast_cpu_dtype,torch.set_autocast_ipu_dtype,torch.get_autocast_ipu_dtype,torch.get_autocast_gpu_dtype,torch.set_autocast_gpu_dtype,torch.get_autocast_xla_dtype,torch.set_autocast_xla_dtype,torch.autocast_increment_nesting,torch.autocast_decrement_nesting,torch.is_autocast_cache_enabled,torch.set_autocast_cache_enabled,torch.nn.functional.hardswish,torch.is_vulkan_available,torch.are_deterministic_algorithms_enabled,torch.use_deterministic_algorithms,torch.is_deterministic_algorithms_warn_only_enabled,torch.set_deterministic_debug_mode,torch.get_deterministic_debug_mode,torch.set_float32_matmul_precision,torch.get_float32_matmul_precision,torch.unify_type_list,torch.is_warn_always_enabled,torch.set_warn_always,torch.vitals_enabled,torch.set_vital,torch.read_vitals,torch.vmap,torch.cond,torch.frombuffer,torch.asarray,torch._functional_sym_constrain_range,torch._make_dep_token,Tensor.__delitem__,Tensor.__dir__,Tensor.__getattribute__,Tensor.__init__,Tensor.__iter__,Tensor.__init_subclass__,Tensor.__delattr__,Tensor.__setattr__,Tensor.__torch_function__,Tensor.__torch_dispatch__,Tensor.__new__,Tensor.__class__,Tensor.__subclasshook__,Tensor.__hash__,Tensor.as_subclass,Tensor.eig,Tensor.lstsq,Tensor.reinforce,Tensor.new,Tensor.new_tensor,Tensor.new_empty,Tensor.new_empty_strided,Tensor.new_zeros,Tensor.new_ones,Tensor.new_full,Tensor._make_subclass,Tensor.solve,Tensor.symeig,Tensor.stride,Tensor.unflatten,Tensor.to_sparse_coo,Tensor.to_sparse_csr,Tensor.to_sparse_csc,Tensor.to_sparse_bsr,Tensor.to_sparse_bsc,Tensor._to_sparse,Tensor._to_sparse_csr,Tensor._to_sparse_csc,Tensor._to_sparse_bsr,Tensor._to_sparse_bsc,Tensor._typed_storage,Tensor._reduce_ex_internal,Tensor._fix_weakref,Tensor._view_func,Tensor._view_func_unsafe,Tensor._rev_view_func_unsafe,Tensor._make_wrapper_subclass,Tensor._python_dispatch.__get__,Tensor._has_symbolic_sizes_strides.__get__,Tensor._conj,Tensor._conj_physical,Tensor._lazy_clone,Tensor._neg_view,Tensor._is_zerotensor,Tensor._is_all_true,Tensor._is_any_true,Tensor._addmm_activation,Tensor.to_padded_tensor,}
@functools.lru_cache(None)defget_default_nowrap_functions()->Set[Callable]:""" Return public functions that do not wrap in a subclass when invoked by the default ``Tensor.__torch_function__`` that preserves subclasses. Typically, these functions represent field accesses (i.e., retrieving a Tensor that is stored somewhere on the Tensor) as opposed to computation. Users of these functions expect object identity to be preserved over multiple accesses (e.g., ``a.grad is a.grad``) which cannot be upheld if we're wrapping on the fly every time (furthermore, the tensor stored here might already be the subclass, in which case wrapping really ought not to happen). Not ALL property accessors have this property; for example ``Tensor.T`` actually just creates a new transposed tensor on the fly, and so we SHOULD interpose on these calls (you need to check the implementation of the function to see if this is the case or not). Additionally, if a property accessor doesn't return a Tensor, it doesn't have to be on this list (though it is harmless if it is). """Tensor=torch.Tensorreturn{Tensor._base.__get__,Tensor.grad.__get__,Tensor._grad.__get__,}
[docs]@functools.lru_cache(None)@_disable_user_warningsdefget_testing_overrides()->Dict[Callable,Callable]:"""Return a dict containing dummy overrides for all overridable functions Returns ------- Dict[Callable, Callable] A dictionary that maps overridable functions in the PyTorch API to lambda functions that have the same signature as the real function and unconditionally return -1. These lambda functions are useful for testing API coverage for a type that defines ``__torch_function__``. Examples -------- >>> import inspect >>> my_add = torch.overrides.get_testing_overrides()[torch.add] >>> inspect.signature(my_add) <Signature (input, other, out=None)> """# Every function in the PyTorchAPI that can be overriden needs an entry# in this dict.## Optimally we would use inspect to get the function signature and define# the lambda function procedurally but that is blocked by generating# function signatures for native kernels that can be consumed by inspect.# See Issue #28233.Tensor=torch.Tensorret:Dict[Callable,Callable]={torch.abs:lambdainput,out=None:-1,torch.absolute:lambdainput,out=None:-1,torch.adaptive_avg_pool1d:lambdainput,output_size:-1,torch.adaptive_max_pool1d:lambdainputs,output_size:-1,torch.acos:lambdainput,out=None:-1,torch.adjoint:lambdainput:-1,torch.arccos:lambdainput,out=None:-1,torch.acosh:lambdainput,out=None:-1,torch.arccosh:lambdainput,out=None:-1,torch.add:lambdainput,other,out=None:-1,torch.addbmm:lambdainput,batch1,batch2,alpha=1,beta=1,out=None:-1,torch.addcdiv:lambdainput,tensor1,tensor2,value=1,out=None:-1,torch.addcmul:lambdainput,tensor1,tensor2,value=1,out=None:-1,torch.addmm:lambdainput,mat1,mat2,beta=1,alpha=1,out=None:-1,torch.addmv:lambdainput,mat,vec,beta=1,alpha=1,out=None:-1,torch.addr:lambdainput,vec1,vec2,beta=1,alpha=1,out=None:-1,torch.affine_grid_generator:lambdatheta,size,align_corners:-1,torch.all:lambdainput,dim=None:-1,torch.allclose:lambdainput,other,trol=1e-05,atol=1e-08,equal_nan=False:-1,torch.alpha_dropout:lambdainput,p,train,inplace=False:-1,torch.amax:lambdainput,dim=None:-1,torch.amin:lambdainput,dim=None:-1,torch.aminmax:lambdainput,dim=None,keepdim=False,out=None:-1,torch.angle:lambdainput,out=None:-1,torch.any:lambdainput,dim=None,keepdim=False,out=None:-1,torch.argmax:lambdainput:-1,torch.argmin:lambdainput:-1,torch.argsort:lambdainput,dim=None:-1,torch.asin:lambdainput,out=None:-1,torch._assert_async:lambdainput,msg:-1,torch.arcsin:lambdainput,out=None:-1,torch.asinh:lambdainput,out=None:-1,torch.arcsinh:lambdainput,out=None:-1,torch.atan:lambdainput,out=None:-1,torch.arctan:lambdainput,out=None:-1,torch.atan2:lambdainput,other,out=None:-1,torch.arctan2:lambdainput,other,out=None:-1,torch.atanh:lambdainput,out=None:-1,torch.arctanh:lambdainput,out=None:-1,torch.atleast_1d:lambda*tensors:-1,torch.atleast_2d:lambda*tensors:-1,torch.atleast_3d:lambda*tensors:-1,torch.avg_pool1d:lambdainput,kernel_size,stride=None,padding=0,ceil_mode=False,count_include_pad=True:-1,torch.baddbmm:lambdainput,batch1,batch2,alpha=1,beta=1,out=None:-1,torch.batch_norm:lambdainput,weight,bias,running_mean,running_var,training,momentum,eps,cudnn_enabled:-1,torch.batch_norm_backward_elemt:lambdagrad_out,input,mean,invstd,weight,sum_dy,sum_dy_xmu,count_tensor:-1,torch.batch_norm_backward_reduce:lambdagrad_out,input,mean,invstd,weight,input_g,weight_g,bias_g:-1,torch.batch_norm_elemt:lambdainput,weight,bias,mean,invstd,eps:-1,torch.batch_norm_gather_stats:lambdainput,mean,invstd,running_mean,running_var,momentum,eps,count:-1,torch.batch_norm_gather_stats_with_counts:lambdainput,mean,invstd,running_mean,running_var,momentum,eps,count:-1,torch.batch_norm_stats:lambdainput,eps:-1,torch.batch_norm_update_stats:lambdainput,running_mean,running_var,momentum:-1,torch.bernoulli:lambdainput,generator=None,out=None:-1,torch.bilinear:lambdainput1,input2,weight,bias:-1,torch.binary_cross_entropy_with_logits:(lambdainput,target,weight=None,size_average=None,reduce=None,reduction='mean',pos_weight=None:-1),torch.bincount:lambdainput,weights=None,minlength=0:-1,torch.binomial:lambdacount,prob,generator=None:-1,torch.bitwise_and:lambdainput,other,out=None:-1,torch.bitwise_not:lambdainput,out=None:-1,torch.bitwise_or:lambdainput,other,out=None:-1,torch.bitwise_xor:lambdainput,other,out=None:-1,torch.bitwise_left_shift:lambdainput,other,out=None:-1,torch.bitwise_right_shift:lambdainput,other,out=None:-1,torch.block_diag:lambda*tensors:-1,torch.bmm:lambdainput,mat2,out=None:-1,torch.broadcast_tensors:lambda*tensors:-1,torch.broadcast_to:lambdaself,size:-1,torch.bucketize:lambdainput,boundaries,out_int32=False,right=False,out=None:-1,torch.cartesian_prod:lambda*tensors:-1,torch.cat:lambdatensors,dim=0,out=None:-1,torch.concat:lambdatensors,dim=0,out=None:-1,# alias for torch.cattorch.concatenate:lambdatensors,dim=0,out=None:-1,# alias for torch.concatenatetorch.cdist:lambdax1,x2,p=2.0,compute_mode='use_mm_for_euclid_dist_if_necessary':-1,torch.ceil:lambdainput,out=None:-1,torch.celu:lambdainput,alpha=1.,inplace=False:-1,torch.chain_matmul:lambda*matrices,out=None:-1,torch.channel_shuffle:lambdainput,groups:-1,torch.cholesky:lambdainput,upper=False,out=None:-1,torch.linalg.cholesky:lambdainput,out=None:-1,torch.linalg.cholesky_ex:lambdainput,check_errors=False,out=None:-1,torch.cholesky_inverse:lambdainput,upper=False,out=None:-1,torch.cholesky_solve:lambdainput1,input2,upper=False,out=None:-1,torch.choose_qparams_optimized:lambdainput,numel,n_bins,ratio,bit_width:-1,torch.chunk:lambdainput,chunks,dim=0:-1,torch.clamp:lambdainput,min=None,max=None,out=None:-1,torch.clip:lambdainput,min=None,max=None,out=None:-1,torch.clamp_min:lambdainput,min,out=None:-1,torch.clamp_max:lambdainput,max,out=None:-1,torch.column_stack:lambdatensors,out=None:-1,torch.cov:lambdainput,correction=1,fweights=None,aweights=None:-1,torch.clone:lambdainput:-1,torch.combinations:lambdainput,r=2,with_replacement=False:-1,torch.complex:lambdareal,imag:-1,torch.copysign:lambdainput,other,out=None:-1,torch.polar:lambdaabs,ang:-1,torch.linalg.cond:lambdainput,ord=None:-1,torch.conj:lambdainput,out=None:-1,torch.conj_physical:lambdainput,out=None:-1,torch.resolve_conj:lambdainput,out=None:-1,torch.resolve_neg:lambdainput,out=None:-1,torch.constant_pad_nd:lambdainput,pad,value=0:-1,torch.conv1d:lambdainput,weight,bias=None,stride=1,padding=0,dilation=1,groups=1:-1,torch.conv2d:lambdainput,weight,bias=None,stride=1,padding=0,dilation=1,groups=1:-1,torch.conv3d:lambdainput,weight,bias=None,stride=1,padding=0,dilation=1,groups=1:-1,torch.convolution:lambdainput,weight,bias,stride,padding,dilation,transposed,output_adding,groups:-1,torch.conv_tbc:lambdainput,weight,bias,pad=0:-1,torch.conv_transpose1d:lambdainput,weight,bias=None,stride=1,padding=0,output_padding=0,groups=1,dilation=1:-1,torch.conv_transpose2d:lambdainput,weight,bias=None,stride=1,padding=0,output_padding=0,groups=1,dilation=1:-1,torch.conv_transpose3d:lambdainput,weight,bias=None,stride=1,padding=0,output_padding=0,groups=1,dilation=1:-1,torch.corrcoef:lambdainput:-1,torch.cos:lambdainput,out=None:-1,torch.cosine_embedding_loss:lambdainput1,input2,target,margin=0,size_average=None,reduce=None,reduction='mean':-1,torch.cosh:lambdainput,out=None:-1,torch.cosine_similarity:lambdax1,x2,dim=1,eps=1e-8:-1,torch.count_nonzero:lambdainput:-1,torch.cross:lambdainput,other,dim=None,out=None:-1,torch.linalg.cross:lambdainput,other,dim=-1,out=None:-1,torch.ctc_loss:(lambdalog_probs,targets,input_lengths,target_lengths,blank=0,reduction='mean',zero_infinity=False:-1),torch.cummax:lambdainput,dim,out=None:-1,torch.cummin:lambdainput,dim,out=None:-1,torch.cumprod:lambdainput,dim,out=None,dtype=None:-1,torch.cumsum:lambdainput,dim,out=None,dtype=None:-1,torch.cumulative_trapezoid:lambday,x=None,dim=-1:-1,torch.logcumsumexp:lambdainput,dim,out=None:-1,torch.deg2rad:lambdainput,out=None:-1,torch.dequantize:lambdainput:-1,torch.det:lambdainput:-1,torch.linalg.det:lambdainput:-1,# alias for torch.det # type: ignore[attr-defined]torch.detach:lambdainput:-1,torch.diag:lambdainput,diagonal=0,out=None:-1,torch.diag_embed:lambdainput,diagonal=0,out=None:-1,torch.diagflat:lambdainput,offset=0:-1,torch.diff:lambdainput,n=1,dim=-1,prepend=None,append=None,out=None:-1,torch.diagonal:lambdainput,offset=0,dim1=0,dim2=1:-1,torch.linalg.diagonal:lambdainput,offset=0,dim1=-2,dim2=-1:-1,torch.diagonal_scatter:lambdainput,src,offset=0,dim1=0,dim2=1:-1,torch.as_strided_scatter:lambdaself,src,size,stride,storage_offset=None:-1,torch.digamma:lambdainput,out=None:-1,torch.dist:lambdainput,other,p=2:-1,torch.div:lambdainput,other,rounding_mode=None,out=None:-1,torch.divide:lambdainput,other,rounding_mode=None,out=None:-1,torch.dot:lambdainput,other,out=None:-1,torch.dropout:lambdainput,p,train,inplace=False:-1,torch.dsmm:lambdainput,mat2:-1,torch.hsmm:lambdamat1,mat2:-1,torch.dsplit:lambdainput,indices_or_sections:-1,torch.dstack:lambdatensors,out=None:-1,torch.linalg.eig:lambdainput,out=None:-1,torch.linalg.eigvals:lambdainput,out=None:-1,torch.linalg.eigh:lambdainput,UPLO="L",out=None:-1,torch.linalg.eigvalsh:lambdainput,UPLO="L",out=None:-1,torch.einsum:lambdaequation,*operands:-1,torch.embedding:(lambdainput,weight,padding_idx=None,max_norm=None,norm_type=2.0,scale_grad_by_freq=False,sparse=False:-1),torch.embedding_bag:(lambdainput,weight,offsets,max_norm=None,norm_type=2,scale_grad_by_freq=False,mode='mean',sparse=False,per_sample_weights=None,padding_idx=None:-1),torch.empty_like:lambdainput,dtype=None,layout=None,device=None,requires_grad=False:-1,torch.eq:lambdainput,other,out=None:-1,torch.equal:lambdainput,other:-1,torch.erf:lambdainput,out=None:-1,torch.erfc:lambdainput,out=None:-1,torch.erfinv:lambdainput,out=None:-1,torch.exp:lambdainput,out=None:-1,torch.exp2:lambdainput,out=None:-1,torch.expm1:lambdainput,out=None:-1,torch.fake_quantize_per_channel_affine:lambdainput,scale,zero_point,axis,quant_min,quant_max:-1,torch.fake_quantize_per_tensor_affine:lambdainput,scale,zero_point,quant_min,quant_max:-1,torch.fused_moving_avg_obs_fake_quant:(lambdax,observer_on,fake_quant_on,averaging_const,running_min,running_max,scale,zero_point,quant_min,quant_max,ch_axis,per_row_fake_quant=False,symmetric_quant=False:-1),torch.fbgemm_linear_fp16_weight:lambdainput,packed_weight,bias:-1,torch.fbgemm_linear_fp16_weight_fp32_activation:lambdainput,packed_weight,bias:-1,torch.fbgemm_linear_int8_weight:lambdainput,weight,packed,col_offsets,weight_scale,weight_zero_point,bias:-1,torch.fbgemm_linear_int8_weight_fp32_activation:(lambdainput,weight,packed,col_offsets,weight_scale,weight_zero_point,bias:-1),torch.fbgemm_linear_quantize_weight:lambdainput:-1,torch.fbgemm_pack_gemm_matrix_fp16:lambdainput:-1,torch.fbgemm_pack_quantized_matrix:lambdainput,a,b:-1,torch.feature_alpha_dropout:lambdainput,p,train:-1,torch.feature_dropout:lambdainput,p,train:-1,torch.fft.ifft:lambdainput,n=None,dim=-1,norm=None:-1,torch.fft.rfft:lambdainput,n=None,dim=-1,norm=None:-1,torch.fft.irfft:lambdainput,n=None,dim=-1,norm=None:-1,torch.fft.hfft:lambdainput,n=None,dim=-1,norm=None:-1,torch.fft.ihfft:lambdainput,n=None,dim=-1,norm=None:-1,torch.fft.hfft2:lambdainput,s=None,dim=(-2,-1),norm=None:-1,torch.fft.ihfft2:lambdainput,s=None,dim=(-2,-1),norm=None:-1,torch.fft.hfftn:lambdainput,s=None,dim=-1,norm=None:-1,torch.fft.ihfftn:lambdainput,s=None,dim=-1,norm=None:-1,torch.fft.fftn:lambdainput,s=None,dim=None,norm=None:-1,torch.fft.ifftn:lambdainput,s=None,dim=None,norm=None:-1,torch.fft.rfftn:lambdainput,s=None,dim=None,norm=None:-1,torch.fft.irfftn:lambdainput,s=None,dim=None,norm=None:-1,torch.fft.fft2:lambdainput,s=None,dim=(-2,-1),norm=None:-1,torch.fft.ifft2:lambdainput,s=None,dim=(-2,-1),norm=None:-1,torch.fft.rfft2:lambdainput,s=None,dim=(-2,-1),norm=None:-1,torch.fft.irfft2:lambdainput,s=None,dim=(-2,-1),norm=None:-1,torch.fft.fftshift:lambdainput,dim=None:-1,torch.fft.ifftshift:lambdainput,dim=None:-1,torch.fft.fft:lambdainput,n=None,dim=-1,norm=None:-1,torch.fix:lambdainput,out=None:-1,torch.flatten:lambdainput,start_dim=0,end_dim=-1:-1,torch.flip:lambdainput,dims:-1,torch.fliplr:lambdainput:-1,torch.flipud:lambdainput:-1,torch.frobenius_norm:lambdainput,dim=None,keepdim=False,out=None:-1,torch.floor:lambdainput,out=None:-1,torch.floor_divide:lambdainput,other:-1,torch.float_power:lambdainput,exponent,out=None:-1,torch.fmod:lambdainput,other,out=None:-1,torch.frac:lambdainput,out=None:-1,torch.frexp:lambdainput,out=None:-1,torch.full_like:lambdainput,fill_value,out=None,dtype=None,layout=torch.strided,device=None,requires_grad=False:-1,torch._functional_assert_async:lambdainput,msg,dep_token:-1,torch.lu_unpack:lambdaLU_data,LU_pivots,unpack_data=True,unpack_pivots=True:-1,torch.gather:lambdainput,dim,index,out=None,sparse_grad=False:-1,torch.gcd:lambdainput,other,out=None:-1,torch.ge:lambdainput,other,out=None:-1,torch.greater_equal:lambdainput,other,out=None:-1,torch.geqrf:lambdainput,out=None:-1,torch.i0:lambdainput,out=None:-1,torch.inner:lambdainput,other,out=None:-1,torch.outer:lambdainput,vec2,out=None:-1,torch.ger:lambdainput,vec2,out=None:-1,# alias for torch.outertorch.gradient:lambdainput,spacing=None,dim=None,edge_order=1:-1,torch.grid_sampler:lambdainput,grid,interpolation_mode,padding_mode,align_corners:-1,torch.grid_sampler_2d:lambdainput,grid,interpolation_mode,padding_mode,align_corners:-1,torch.grid_sampler_3d:lambdainput,grid,interpolation_mode,padding_mode,align_corners:-1,torch.group_norm:lambdainput,num_groups,weight=None,bias=None,eps=1e-05,cudnn_enabled=True:-1,torch.gru:lambdainput,hx,params,has_biases,num_layers,dropout,train,bidirectional,batch_first:-1,torch.gru_cell:lambdainput,hx,w_ih,w_hh,b_ih=None,b_hh=None:-1,torch.gt:lambdainput,other,out=None:-1,torch.greater:lambdainput,other,out=None:-1,torch.hardshrink:lambdainput,lambd=0.5:-1,torch.heaviside:lambdainput,values,out=None:-1,torch.hinge_embedding_loss:lambdainput,target,margin=1.0,size_average=None,reduce=None,reduction='mean':-1,torch.histc:lambdainput,bins=100,min=0,max=0,out=None:-1,torch.histogram:lambdainput,bins=100,min=None,max=None,weight=None,density=False,out=None:-1,torch.histogramdd:lambdainput,bins,range=None,weight=None,density=False:-1,torch.linalg.householder_product:lambdainput,tau:-1,torch.hspmm:lambdamat1,mat2,out=None:-1,torch.hsplit:lambdainput,indices_or_sections:-1,torch.hstack:lambdatensors,out=None:-1,torch.hypot:lambdainput,other,out=None:-1,torch.igamma:lambdainput,other,out=None:-1,torch.igammac:lambdainput,other,out=None:-1,torch.imag:lambdainput,out=None:-1,torch.index_add:lambdainput,dim,index,source:-1,torch.index_copy:lambdainput,dim,index,source:-1,torch.index_put:lambdainput,indices,values,accumulate=False:-1,torch.index_select:lambdainput,dim,index,out=None:-1,torch.index_fill:lambdainput,dim,index,value:-1,torch.index_reduce:lambdainput,dim,index,source,reduce,include_input=True:-1,torch.isfinite:lambdatensor:-1,torch.isin:lambdae,te,assume_unique=False,invert=False:-1,torch.isinf:lambdatensor:-1,torch.isreal:lambdatensor:-1,torch.isposinf:lambdainput,out=None:-1,torch.isneginf:lambdainput,out=None:-1,torch.instance_norm:(lambdainput,running_mean,running_var,weight,bias,use_input_stats,momentum,eps,cudnn_enabled:-1),torch.int_repr:lambdainput:-1,torch.inverse:lambdainput,out=None:-1,torch.linalg.inv:lambdainput,out=None:-1,torch.linalg.inv_ex:lambdainput,check_errors=False,out=None:-1,torch.is_complex:lambdainput:-1,torch.is_conj:lambdainput:-1,torch.is_neg:lambdainput:-1,torch.is_distributed:lambdainput:-1,torch.is_inference:lambdainput:-1,torch.is_floating_point:lambdainput:-1,torch.is_nonzero:lambdainput:-1,torch.is_same_size:lambdainput,other:-1,torch.is_signed:lambdainput:-1,torch.isclose:lambdainput,other,rtol=1e-05,atol=1e-08,equal_nan=False:-1,torch.isnan:lambdainput:-1,torch.istft:(lambdainput,n_fft,hop_length=None,win_length=None,window=None,center=True,normalized=False,onesided=None,length=None,return_complex=False:-1),torch.kl_div:lambdainput,target,size_average=None,reduce=None,reduction='mean',log_target=False:-1,torch.kron:lambdainput,other:-1,torch.kthvalue:lambdainput,k,dim=None,keepdim=False,out=None:-1,torch.linalg.ldl_factor_ex:lambdainput,hermitian=False,check_errors=False,out=None:-1,torch.linalg.ldl_factor:lambdainput,hermitian=False,out=None:-1,torch.linalg.ldl_solve:lambdaLD,pivots,B,hermitian=False,out=None:-1,torch.layer_norm:lambdainput,normalized_shape,weight=None,bias=None,esp=1e-05,cudnn_enabled=True:-1,torch.lcm:lambdainput,other,out=None:-1,torch.ldexp:lambdainput,other,out=None:-1,torch.le:lambdainput,other,out=None:-1,torch.less_equal:lambdainput,other,out=None:-1,torch.lerp:lambdainput,end,weight,out=None:-1,torch.lgamma:lambdainput,out=None:-1,torch.lobpcg:lambdainput,k=None,B=None,X=None,n=None,iK=None,niter=None,tol=None,largest=None,method=None,tracker=None,ortho_iparams=None,ortho_fparams=None,ortho_bparams=None:-1,torch.log:lambdainput,out=None:-1,torch.log_softmax:lambdainput,dim,dtype=None:-1,torch.log10:lambdainput,out=None:-1,torch.log1p:lambdainput,out=None:-1,torch.log2:lambdainput,out=None:-1,torch.logaddexp:lambdainput,other,out=None:-1,torch.logaddexp2:lambdainput,other,out=None:-1,torch.logdet:lambdainput:-1,torch.xlogy:lambdax,y,out=None:-1,torch.logical_and:lambdainput,other,out=None:-1,torch.logical_not:lambdainput,out=None:-1,torch.logical_or:lambdainput,other,out=None:-1,torch.logical_xor:lambdainput,other,out=None:-1,torch.logit:lambdainput,eps=None:-1,torch.logsumexp:lambdainput,names,keepdim=False,out=None:-1,torch.lstm:lambdadata,batch_sizes,hx,params,has_biases,num_layers,dropout,train,bidirectional:-1,torch.lstm_cell:lambdainput,hx,w_ih,w_hh,b_ih=None,b_hh=None:-1,torch.lt:lambdainput,other,out=None:-1,torch.less:lambdainput,other,out=None:-1,torch.lu:lambdaA,pivot=True,get_infos=False,out=None:-1,torch.lu_solve:lambdab,LU_data,LU_pivots,out=None:-1,torch.margin_ranking_loss:lambdainput1,input2,target,margin=0,size_average=None,reduce=None,reduction='mean':-1,# type: ignore[attr-defined] # noqa: B950torch.masked_fill:lambdainput,mask,value:-1,torch.masked_scatter:lambdainput,mask,source:-1,torch.masked_select:lambdainput,mask,out=None:-1,torch.matmul:lambdainput,other,out=None:-1,torch.linalg.lu:lambdainput,pivot=True,out=None:-1,torch.linalg.lu_factor:lambdainput,pivot=True,out=None:-1,torch.linalg.lu_factor_ex:lambdainput,pivot=True,check_errors=False,out=None:-1,torch.linalg.lu_solve:lambdaLU,pivots,B,left=True,adjoint=False,out=None:-1,torch.linalg.matmul:lambdainput,other,out=None:-1,# alias for torch.matmultorch.matrix_power:lambdainput,n:-1,torch.linalg.matrix_power:lambdainput,n,out=None:-1,torch.linalg.matrix_rank:lambdainput,tol=None,hermitian=False:-1,torch.linalg.multi_dot:lambdatensors,out=None:-1,torch.matrix_exp:lambdainput:-1,torch.linalg.matrix_exp:lambdainput:-1,torch.max:lambdainput,out=None:-1,torch.maximum:lambdainput,other,out=None:-1,torch.fmax:lambdainput,other,out=None:-1,torch.max_pool1d:lambdainput,kernel_size,stride=None,padding=0,dilation=1,ceil_mode=False:-1,torch.max_pool2d:lambdainput,kernel_size,stride=None,padding=0,dilation=1,ceil_mode=False:-1,torch.max_pool3d:lambdainput,kernel_size,stride=None,padding=0,dilation=1,ceil_mode=False:-1,torch.max_pool1d_with_indices:(lambdainput,kernel_size,stride=None,padding=0,dilation=1,return_indices=False,ceil_mode=False:-1),torch.mean:lambdainput,dim=None:-1,torch.nanmean:lambdainput,dim=None,keepdim=False,dtype=None,out=None:-1,torch.median:lambdainput,dim=None:-1,torch.nanmedian:lambdainput,dim=None:-1,torch.meshgrid:lambda*tensors,**kwargs:-1,torch.min:lambdainput,out=None:-1,torch.minimum:lambdainput,other,out=None:-1,torch.fmin:lambdainput,other,out=None:-1,torch.miopen_batch_norm:(lambdainput,weight,bias,running_mean,running_var,training,exponential_average_factor,epsilon:-1),torch.miopen_convolution:lambdainput,weight,bias,padding,stride,dilation,groups,benchmark,deterministic:-1,torch.miopen_convolution_add_relu:lambdainput,weight,z,alpha,bias,stride,padding,dilation,groups:-1,torch.miopen_convolution_relu:lambdainput,weight,bias,stride,padding,dilation,groups:-1,torch.miopen_convolution_transpose:(lambdainput,weight,bias,padding,output_padding,stride,dilation,groups,benchmark,deterministic:-1),torch.miopen_depthwise_convolution:(lambdainput,weight,bias,padding,stride,dilation,groups,benchmark,deterministic:-1),torch.miopen_rnn:(lambdainput,weight,weight_stride0,hx,cx,mode,hidden_size,num_layers,batch_first,dropout,train,bidirectional,batch_sizes,dropout_state:-1),torch.mm:lambdainput,mat2,out=None:-1,torch.mode:lambdainput,dim=-1,keepdim=False,out=None:-1,torch.movedim:lambdainput,source,destination:-1,torch.moveaxis:lambdainput,source,destination:-1,torch.msort:lambdainput,descending=False,out=None:-1,torch.mul:lambdainput,other,out=None:-1,torch.multiply:lambdainput,other,out=None:-1,torch.multinomial:lambdainput,num_samples,replacement=False,out=None:-1,torch.mv:lambdainput,vec,out=None:-1,torch.mvlgamma:lambdainput,p:-1,torch.narrow:lambdainput,dim,start,length:-1,torch.nan_to_num:lambdainput,nan=0.0,posinf=None,neginf=None,out=None:-1,torch.native_batch_norm:lambdainput,weight,bias,running_mean,running_var,training,momentum,eps:-1,torch._native_batch_norm_legit:lambdainput,weight,bias,training,momentum,eps:-1,torch.native_dropout:lambdainput,p,train:-1,torch.native_layer_norm:lambdainput,normalized_shape,weight=None,bias=None,eps=1e-05:-1,torch.native_group_norm:lambdainput,weight,bias,N,C,HxW,group,eps:-1,torch.native_norm:lambdainput,p=2,dim=None,keepdim=False,dtype=None:-1,torch.native_channel_shuffle:lambdainput,groups:-1,torch.ne:lambdainput,other,out=None:-1,torch.not_equal:lambdainput,other,out=None:-1,torch.neg:lambdainput,out=None:-1,torch.negative:lambdainput,out=None:-1,torch.nextafter:lambdainput,other,out=None:-1,torch.nn.functional.adaptive_avg_pool2d:lambdainput,output_size:-1,torch.nn.functional.adaptive_avg_pool3d:lambdainput,output_size:-1,torch.nn.functional.adaptive_max_pool1d:lambdainput,output_size,return_indices=False:-1,torch.nn.functional.adaptive_max_pool1d_with_indices:lambdainput,output_size,return_indices=False:-1,torch.nn.functional.adaptive_max_pool2d:lambdainput,output_size,return_indices=False:-1,torch.nn.functional.adaptive_max_pool2d_with_indices:lambdainput,output_size,return_indices=False:-1,torch.nn.functional.adaptive_max_pool3d:lambdainput,output_size,return_indices=False:-1,torch.nn.functional.adaptive_max_pool3d_with_indices:lambdainput,output_size,return_indices=False:-1,torch.nn.functional.affine_grid:lambdatheta,size,align_corners=None:-1,torch.nn.functional.alpha_dropout:lambdainput,p=0.5,training=False,inplace=False:-1,torch.nn.functional.avg_pool2d:(lambdainput,kernel_size,stride=None,padding=0,ceil_mode=False,count_include_pad=True,divisor_override=None:-1),torch.nn.functional.avg_pool3d:(lambdainput,kernel_size,stride=None,padding=0,ceil_mode=False,count_include_pad=True,divisor_override=None:-1),torch.nn.functional.batch_norm:(lambdainput,running_mean,running_var,weight=None,bias=None,training=False,momentum=0.1,eps=1e-05:-1),torch.nn.functional.bilinear:lambdainput1,input2,weight,bias=None:-1,torch.nn.functional.binary_cross_entropy:(lambdainput,target,weight=None,size_average=None,reduce=None,reduction="mean":-1),torch.nn.functional.binary_cross_entropy_with_logits:(lambdainput,target,weight=None,size_average=None,reduce=None,reduction="mean",pos_weight=None:-1),torch.nn.functional.celu:lambdainput,alpha=1.0,inplace=False:-1,torch.nn.functional.cosine_embedding_loss:(lambdainput1,input2,target,margin=0,size_average=None,reduce=None,reduction='mean':-1),torch.nn.functional.cross_entropy:(lambdainput,target,weight=None,size_average=None,ignore_index=-100,reduce=None,reduction="mean",label_smoothing=0.0:-1),torch.nn.functional.ctc_loss:(lambdalog_probs,targets,input_lengths,target_lengths,blank=0,reduction='mean',zero_infinity=False:-1),torch.nn.functional.dropout:lambdainput,p=0.5,training=True,inplace=False:-1,torch.nn.functional.dropout1d:lambdainput,p=0.5,training=True,inplace=False:-1,torch.nn.functional.dropout2d:lambdainput,p=0.5,training=True,inplace=False:-1,torch.nn.functional.dropout3d:lambdainput,p=0.5,training=True,inplace=False:-1,torch.nn.functional.elu:lambdainput,alpha=1.0,inplace=False:-1,torch.nn.functional.embedding:(lambdainput,weight,padding_idx=None,max_norm=None,norm_type=2.0,scale_grad_by_freq=False,sparse=False:-1),torch.nn.functional.embedding_bag:(lambdainput,weight,offsets=None,max_norm=None,norm_type=2,scale_grad_by_freq=False,mode='mean',sparse=False,per_sample_weights=None,include_last_offset=False,padding_idx=None:-1),torch.nn.functional.feature_alpha_dropout:lambdainput,p=0.5,training=False,inplace=False:-1,torch.nn.functional.fold:lambdainput,output_size,kernel_size,dilation=1,padding=0,stride=1:-1,torch.nn.functional.fractional_max_pool2d:(lambdainput,kernel_size,output_size=None,output_ratio=None,return_indices=False,_random_samples=None:-1),torch.nn.functional.fractional_max_pool2d_with_indices:(lambdainput,kernel_size,output_size=None,output_ratio=None,return_indices=False,_random_samples=None:-1),torch.nn.functional.fractional_max_pool3d:(lambdainput,kernel_size,output_size=None,output_ratio=None,return_indices=False,_random_samples=None:-1),torch.nn.functional.fractional_max_pool3d_with_indices:(lambdainput,kernel_size,output_size=None,output_ratio=None,return_indices=False,_random_samples=None:-1),torch.nn.functional.gaussian_nll_loss:lambdainput,target,var,full=False,eps=1e-06,reduction='mean':-1,torch.nn.functional.gelu:lambdainput,approximate='none':-1,torch.nn.functional.glu:lambdainput,dim=-1:-1,torch.nn.functional.grid_sample:lambdainput,grid,mode='bilinear',padding_mode='zeros',align_corners=None:-1,torch.nn.functional.group_norm:lambdainput,num_groups,weight=None,bias=None,eps=1e-05:-1,torch.nn.functional.gumbel_softmax:lambdalogits,tau=1,hard=False,eps=1e-10,dim=-1:-1,torch.nn.functional.hardshrink:lambdainput,lambd=0.5:-1,torch.nn.functional.hardtanh:lambdainput,min_val=-1.,max_val=1.,inplace=False:-1,torch.nn.functional.hinge_embedding_loss:(lambdainput,target,margin=1.0,size_average=None,reduce=None,reduction='mean':-1),torch.nn.functional.instance_norm:(lambdainput,running_mean=None,running_var=None,weight=None,bias=None,use_input_stats=True,momentum=0.1,eps=1e-05:-1),torch.nn.functional.interpolate:(lambdainput,size=None,scale_factor=None,mode='nearest',align_corners=None,recompute_scale_factor=None,antialias=False:-1),torch.nn.functional.kl_div:lambdainput,target,size_average=None,reduce=None,reduction='mean',log_target=False:-1,torch.nn.functional.l1_loss:lambdainput,target,size_average=None,reduce=None,reduction='mean':-1,torch.nn.functional.layer_norm:lambdainput,normalized_shape,weight=None,bias=None,eps=1e-05:-1,torch.nn.functional.leaky_relu:lambdainput,negative_slope=0.01,inplace=False:-1,torch.nn.functional.linear:lambdainput,weight,bias=None:-1,torch.nn.functional.local_response_norm:lambdainput,size,alpha=0.0001,beta=0.75,k=1.0:-1,torch.nn.functional.log_softmax:lambdainput,dim=None,_stacklevel=3,dtype=None:-1,torch.nn.functional.logsigmoid:lambdainput:-1,torch.nn.functional.lp_pool1d:lambdainput,norm_type,kernel_size,stride=None,ceil_mode=False:-1,torch.nn.functional.lp_pool2d:lambdainput,norm_type,kernel_size,stride=None,ceil_mode=False:-1,torch.nn.functional.lp_pool3d:lambdainput,norm_type,kernel_size,stride=None,ceil_mode=False:-1,torch.nn.functional.margin_ranking_loss:(lambdainput1,input2,target,margin=0,size_average=None,reduce=None,reduction='mean':-1),torch.nn.functional.max_pool1d:(lambdainput,kernel_size,stride=None,padding=0,dilation=1,ceil_mode=False,return_indices=False:-1),torch.nn.functional.max_pool1d_with_indices:(lambdainput,kernel_size,stride=None,padding=0,dilation=1,return_indices=False,ceil_mode=False:-1),torch.nn.functional.max_pool2d:(lambdainput,kernel_size,stride=None,padding=0,dilation=1,ceil_mode=False,return_indices=False:-1),torch.nn.functional.max_pool2d_with_indices:(lambdainput,kernel_size,stride=None,padding=0,dilation=1,return_indices=False,ceil_mode=False:-1),torch.nn.functional.max_pool3d:(lambdainput,kernel_size,stride=None,padding=0,dilation=1,return_indices=False,ceil_mode=False:-1),torch.nn.functional.max_pool3d_with_indices:(lambdainput,kernel_size,stride=None,padding=0,dilation=1,return_indices=False,ceil_mode=False:-1),torch.nn.functional.max_unpool1d:lambdainput,indices,kernel_size,stride=None,padding=0,output_size=None:-1,torch.nn.functional.max_unpool2d:lambdainput,indices,kernel_size,stride=None,padding=0,output_size=None:-1,torch.nn.functional.max_unpool3d:lambdainput,indices,kernel_size,stride=None,padding=0,output_size=None:-1,torch.nn.functional.mse_loss:lambdainput,target,size_average=None,reduce=None,reduction='mean':-1,torch.nn.functional.multi_head_attention_forward:(lambdaquery,key,value,embed_dim_to_check,num_heads,in_proj_weight,in_proj_bias,bias_k,bias_v,add_zero_attn,dropout_p,out_proj_weight,out_proj_bias,training=True,key_padding_mask=None,need_weights=True,attn_mask=None,use_separate_proj_weight=False,q_proj_weight=None,k_proj_weight=None,v_proj_weight=None,static_k=None,static_v=None,average_attn_weights=None,is_causal=False:-1),torch.nn.functional.multi_margin_loss:(lambdainput,target,p=1,margin=1.0,weight=None,size_average=None,reduce=None,reduction='mean':-1),torch.nn.functional.multilabel_margin_loss:(lambdainput,target,size_average=None,reduce=None,reduction='mean':-1),torch.nn.functional.multilabel_soft_margin_loss:(lambdainput,target,weight=None,size_average=None,reduce=None,reduction='mean':-1),torch.nn.functional.nll_loss:(lambdainput,target,weight=None,size_average=None,ignore_index=-100,reduce=None,reduction='mean':-1),torch.nn.functional.normalize:lambdainput,p=2,dim=1,eps=1e-12,out=None:-1,torch.nn.functional.one_hot:lambdatensor,num_classes=-1:-1,torch.nn.functional.pad:lambdainput,pad,mode='constant',value=0:-1,torch.nn.functional.pairwise_distance:lambdax1,x2,p=2.0,eps=1e-06,keepdim=False:-1,torch.nn.functional.poisson_nll_loss:(lambdainput,target,log_input=True,full=False,size_average=None,eps=1e-08,reduce=None,reduction='mean':-1),torch.nn.functional.prelu:lambdainput,weight:-1,torch.nn.functional.relu:lambdainput,inplace=False:-1,torch.nn.functional.relu6:lambdainput,inplace=False:-1,torch.nn.functional.rms_norm:lambdainput,normalized_shape,weight=None,eps=1e-6:-1,torch.nn.functional.rrelu:lambdainput,lower=0.125,upper=0.3333333333333333,training=False,inplace=False:-1,torch.nn.functional.selu:lambdainput,inplace=False:-1,torch.nn.functional.silu:lambdainput,inplace=False:-1,torch.nn.functional.mish:lambdainput,inplace=False:-1,torch.nn.functional.scaled_dot_product_attention:lambdaquery,key,value,attn_mask=None,dropout_p=0.0:-1,torch.nn.functional.smooth_l1_loss:lambdainput,target,size_average=None,reduce=None,reduction='mean',beta=1.:-1,torch.nn.functional.huber_loss:lambdainput,target,reduction='mean',delta=1.:-1,torch.nn.functional.soft_margin_loss:lambdainput,target,size_average=None,reduce=None,reduction='mean':-1,torch.nn.functional.softmax:lambdainput,dim=None,_stacklevel=3,dtype=None:-1,torch.nn.functional.softmin:lambdainput,dim=None,_stacklevel=3,dtype=None:-1,torch.nn.functional.softplus:lambdainput,beta=1,threshold=20:-1,torch.nn.functional.softshrink:lambdainput,lambd=0.5:-1,torch.nn.functional.softsign:lambdainput:-1,torch.nn.functional.tanhshrink:lambdainput:-1,torch.nn.functional.threshold:lambdainput,threshold,value,inplace=False:-1,torch.nn.functional.triplet_margin_loss:(lambdaanchor,positive,negative,margin=1.0,p=2,eps=1e-06,swap=False,size_average=None,reduce=None,reduction='mean':-1),torch.nn.functional.triplet_margin_with_distance_loss:(lambdaanchor,positive,negative,*,distance_function=None,margin=1.0,swap=False,reduction='mean':-1),torch.nn.functional.unfold:lambdainput,kernel_size,dilation=1,padding=0,stride=1:-1,torch.nn.init.uniform_:lambdatensor,a=0.,b=1.,generator=None:-1,torch.nn.init.normal_:lambdatensor,mean=0.,std=1.,generator=None:-1,torch.nn.init.constant_:lambdatensor,val:-1,torch.nn.init.kaiming_uniform_:lambdatensor,a=0,mode='fan_in',nonlinearity='leaky_relu',generator=None:-1,torch.nonzero:lambdainput,as_tuple=False:-1,torch.nonzero_static:lambdainput,*,size,fill_value=-1:-1,torch.argwhere:lambdainput:-1,torch.norm:lambdainput,p='fro',dim=None,keepdim=False,out=None,dtype=None:-1,torch.linalg.norm:lambdainput,ord=None,dim=None,keepdim=False,out=None,dtype=None:-1,torch.linalg.vector_norm:lambdainput,ord=2,dim=None,keepdim=False,out=None,dtype=None:-1,torch.linalg.matrix_norm:lambdainput,ord='fro',dim=(-2,-1),keepdim=False,out=None,dtype=None:-1,torch.norm_except_dim:lambdav,pow=2,dim=0:-1,torch.nuclear_norm:lambdainput,p='fro',dim=None,keepdim=False,out=None,dtype=None:-1,torch.numel:lambdainput:-1,torch.orgqr:lambdainput,tau:-1,torch.ormqr:lambdainput,input2,input3,left=True,transpose=False:-1,torch.pairwise_distance:lambdax1,x2,p=2.0,eps=1e-06,keepdim=False:-1,torch.permute:lambdaself,dim:-1,torch.pca_lowrank:lambdainput,q=None,center=True,niter=2:-1,torch.pdist:lambdainput,p=2:-1,torch.pinverse:lambdainput,rcond=1e-15:-1,torch.linalg.pinv:lambdainput,rcond=1e-15,hermitian=False:-1,torch.pixel_shuffle:lambdainput,upscale_factor:-1,torch.pixel_unshuffle:lambdainput,downscale_factor:-1,torch.poisson:lambdainput,generator=None:-1,torch.poisson_nll_loss:lambdainput,target,log_input,full,eps,reduction:-1,torch.polygamma:lambdainput,n,out=None:-1,torch.positive:lambdainput,out=None:-1,torch.prelu:lambdainput,weight:-1,torch.ones_like:lambdainput,dtype=None,layout=None,device=None,requires_grad=False:-1,torch.pow:lambdainput,exponent,out=None:-1,torch.prod:lambdainput,dtype=None:-1,torch.put:lambdainput,index,source,accumulate=False:-1,torch.q_per_channel_axis:lambdainput:-1,torch.q_per_channel_scales:lambdainput:-1,torch.q_per_channel_zero_points:lambdainput:-1,torch.q_scale:lambdainput:-1,torch.q_zero_point:lambdainput:-1,torch.qr:lambdainput,some=True,out=None:-1,torch.linalg.qr:lambdainput,mode='reduced',out=None:-1,torch.quantile:lambdainput,q,dim=None,keepdim=False,interpolation='linear',out=None:-1,torch.nanquantile:lambdainput,q,dim=None,keepdim=False,interpolation='linear',out=None:-1,torch.quantize_per_channel:lambdainput,scales,zero_points,axis,dtype:-1,torch.quantize_per_tensor:lambdainput,scale,zero_point,dtype:-1,torch.quantize_per_tensor_dynamic:lambdainput,dtype,reduce_range:-1,torch.quantized_batch_norm:lambdainput,weight,bias,mean,var,eps,output_scale,output_zero_point:-1,torch.quantized_gru_cell:(lambdainput,hx,w_ih,w_hh,b_ih,b_hh,packed_ih,packed_hh,col_offsets_ih,col_offsets_hh,scale_ih,scale_hh,zero_point_ih,zero_point_hh:-1),torch.quantized_lstm_cell:(lambdainput,hx,w_ih,w_hh,b_ih,b_hh,packed_ih,packed_hh,col_offsets_ih,col_offsets_hh,scale_ih,scale_hh,zero_point_ih,zero_point_hh:-1),torch.quantized_max_pool1d:(lambdainput,kernel_size,stride=tuple(),padding=(0,),dilation=(1,),ceil_mode=False:-1),torch.quantized_max_pool2d:(lambdainput,kernel_size,stride=tuple(),padding=(0,0),dilation=(1,1),ceil_mode=False:-1),torch.quantized_max_pool3d:(lambdainput,kernel_size,stride=tuple(),padding=(0,0,0),dilation=(1,1,1),ceil_mode=False:-1),torch.quantized_rnn_relu_cell:(lambdainput,hx,w_ih,w_hh,b_ih,b_hh,packed_ih,packed_hh,col_offsets_ih,col_offsets_hh,scale_ih,scale_hh,zero_point_ih,zero_point_hh:-1),torch.quantized_rnn_tanh_cell:(lambdainput,hx,w_ih,w_hh,b_ih,b_hh,packed_ih,packed_hh,col_offsets_ih,col_offsets_hh,scale_ih,scale_hh,zero_point_ih,zero_point_hh:-1),torch.rad2deg:lambdainput,out=None:-1,torch.rand_like:lambdainput,dtype=None,layout=None,device=None,requires_grad=False:-1,torch.randint_like:lambdainput,high,dtype=None,layout=torch.strided,device=None,requires_grad=False:-1,torch.randn_like:lambdainput,dtype=None,layout=None,device=None,requires_grad=False:-1,torch.ravel:lambdainput:-1,torch.real:lambdainput,out=None:-1,torch.vdot:lambdainput,other,out=None:-1,torch.linalg.vecdot:lambdainput,other,dim=-1,out=None:-1,torch.view_as_real:lambdainput:-1,torch.view_as_complex:lambdainput:-1,torch.reciprocal:lambdainput,out=None:-1,torch.relu:lambdainput,inplace=False:-1,torch.remainder:lambdainput,other,out=None:-1,torch.renorm:lambdainput,p,dim,maxnorm,out=None:-1,torch.repeat_interleave:lambdainput,dim=None:-1,torch.reshape:lambdainput,shape:-1,torch.rms_norm:lambdainput,normalized_shape,weight=None,eps=1e-6:-1,torch.rnn_relu:lambdainput,hx,params,has_biases,num_layers,dropout,train,bidirectional,batch_first:-1,torch.rnn_relu_cell:lambdainput,hx,w_ih,w_hh,b_ih=None,b_hh=None:-1,torch.rnn_tanh:lambdainput,hx,params,has_biases,num_layers,dropout,train,bidirectional,batch_first:-1,torch.rnn_tanh_cell:lambdainput,hx,w_ih,w_hh,b_ih=None,b_hh=None:-1,torch.roll:lambdainput,shifts,dims=None:-1,torch.rot90:lambdainput,k=1,dims=(0,1):-1,torch.round:lambdainput,out=None:-1,torch.row_stack:lambdatensors,out=None:-1,# alias for torch.vstacktorch._rowwise_prune:(lambdaweight,mask,compressed_indices_dtype:-1),torch.rrelu:lambdainput,lower=1./8,upper=1./3,training=False,inplace=False:-1,torch.rsqrt:lambdainput,out=None:-1,torch.rsub:lambdainput,other,alpha=1:-1,torch.saddmm:lambdainput,mat1,mat2,beta=1,alpha=1,out=None:-1,torch.scatter:lambdainput,dim,index,src:-1,torch.scatter_add:lambdainput,dim,index,src:-1,torch.scatter_reduce:lambdainput,dim,index,src,reduce,include_self=True:-1,torch.searchsorted:lambdasorted_sequence,input,out_int32=False,right=False,out=None:-1,torch._segment_reduce:lambdadata,reduce="max",lengths=None,indices=None,offsets=None,axis=0,unsafe=False:-1,torch.select:lambdainput,dim,index:-1,torch.select_scatter:lambdainput,src,dim,index:-1,torch.slice_inverse:lambdainput,src,dim=0,start=None,end=None,step=1:-1,torch.slice_scatter:lambdainput,src,dim=0,start=None,end=None,step=1:-1,torch.selu:lambdainput,inplace=False:-1,torch.sigmoid:lambdainput,out=None:-1,torch.sign:lambdainput,out=None:-1,torch.signbit:lambdainput,out=None:-1,torch.sgn:lambdainput,out=None:-1,torch.sin:lambdainput,out=None:-1,torch.sinc:lambdainput,out=None:-1,torch.sinh:lambdainput,out=None:-1,torch.slogdet:lambdainput:-1,torch.linalg.slogdet:lambdainput:-1,torch.smm:lambdainput,mat2:-1,torch.spmm:lambdainput,mat2:-1,torch.softmax:lambdainput,dim,dtype=None:-1,torch.linalg.solve:lambdaA,B,left=True,out=None:-1,torch.linalg.solve_ex:lambdaA,B,left=True,check_errors=False,out=None:-1,torch.sort:lambdainput,dim=-1,descending=False,*,stable=False,out=None:-1,torch.split:lambdatensor,split_size_or_sections,dim=0:-1,torch.split_with_sizes:lambdatensor,split_size_or_sections,dim=0:-1,torch.sqrt:lambdainput,out=None:-1,torch.square:lambdainput,out=None:-1,torch.squeeze:lambdainput,dim=None,out=None:-1,torch.sspaddmm:lambdainput,mat1,mat2,beta=1,alpha=1,out=None:-1,torch.stack:lambdatensors,dim=0,out=None:-1,torch.std:lambdainput,dim=None:-1,torch.std_mean:lambdainput,dim=None:-1,torch.stft:(lambdainput,n_fft,hop_length=None,win_length=None,window=None,center=True,pad_mode='reflect',normalized=False,onesided=True,return_complex=None:-1),torch.sub:lambdainput,other,out=None:-1,torch.subtract:lambdainput,other,out=None:-1,torch.sum:lambdainput,dim=None:-1,torch.sym_float:lambdainput:-1,torch.sym_int:lambdainput:-1,torch.sym_max:lambdaa,b:-1,torch.sym_min:lambdaa,b:-1,torch.sym_not:lambdainput:-1,torch.sym_ite:lambdaa,b,c:-1,torch._sym_sqrt:lambdainput:-1,torch._sym_cos:lambdainput:-1,torch._sym_cosh:lambdainput:-1,torch._sym_sin:lambdainput:-1,torch._sym_sinh:lambdainput:-1,torch._sym_tan:lambdainput:-1,torch._sym_tanh:lambdainput:-1,torch._sym_asin:lambdainput:-1,torch._sym_acos:lambdainput:-1,torch._sym_atan:lambdainput:-1,torch.nansum:lambdainput,dim=None:-1,torch.svd:lambdainput,some=True,compute_uv=True,out=None:-1,torch.svd_lowrank:lambdainput,q=6,niter=2,M=None:-1,torch.linalg.svd:lambdainput,full_matrices=True,out=None:-1,torch.linalg.svdvals:lambdainput,out=None:-1,torch.swapaxes:lambdainput,dim0,dim1:-1,torch.swapdims:lambdainput,axis0,axis1:-1,torch.special.airy_ai:lambdainput:-1,torch.special.bessel_j0:lambdainput:-1,torch.special.bessel_j1:lambdainput:-1,torch.special.bessel_y0:lambdainput:-1,torch.special.bessel_y1:lambdainput:-1,torch.special.chebyshev_polynomial_t:lambdainput,n,out=None:-1,torch.special.chebyshev_polynomial_u:lambdainput,n,out=None:-1,torch.special.chebyshev_polynomial_v:lambdainput,n,out=None:-1,torch.special.chebyshev_polynomial_w:lambdainput,n,out=None:-1,torch.special.digamma:lambdainput:-1,torch.special.entr:lambdainput:-1,torch.special.erf:lambdainput:-1,torch.special.erfc:lambdainput:-1,torch.special.erfcx:lambdainput:-1,torch.special.erfinv:lambdainput:-1,torch.special.exp2:lambdainput:-1,torch.special.expit:lambdainput:-1,torch.special.expm1:lambdainput:-1,torch.special.gammainc:lambdainput,other,out=None:-1,torch.special.gammaincc:lambdainput,other,out=None:-1,torch.special.gammaln:lambdainput:-1,torch.special.hermite_polynomial_h:lambdainput,n,out=None:-1,torch.special.hermite_polynomial_he:lambdainput,n,out=None:-1,torch.special.i0:lambdainput:-1,torch.special.i0e:lambdainput:-1,torch.special.i1:lambdainput:-1,torch.special.i1e:lambdainput:-1,torch.special.laguerre_polynomial_l:lambdainput,n,out=None:-1,torch.special.legendre_polynomial_p:lambdainput,n,out=None:-1,torch.special.log1p:lambdainput:-1,torch.special.log_ndtr:lambdainput:-1,torch.special.log_softmax:lambdainput,dim,dtype=None:-1,torch.special.logit:lambdainput:-1,torch.special.logsumexp:lambdainput,dim,keepdim=False,out=None:-1,torch.special.modified_bessel_i0:lambdainput:-1,torch.special.modified_bessel_i1:lambdainput:-1,torch.special.modified_bessel_k0:lambdainput:-1,torch.special.modified_bessel_k1:lambdainput:-1,torch.special.multigammaln:lambdainput,p:-1,torch.special.ndtr:lambdainput:-1,torch.special.ndtri:lambdainput:-1,torch.special.polygamma:lambdainput,n,out=None:-1,torch.special.psi:lambdainput:-1,torch.special.round:lambdainput:-1,torch.special.scaled_modified_bessel_k0:lambdainput:-1,torch.special.scaled_modified_bessel_k1:lambdainput:-1,torch.special.shifted_chebyshev_polynomial_t:lambdainput,n,out=None:-1,torch.special.shifted_chebyshev_polynomial_u:lambdainput,n,out=None:-1,torch.special.shifted_chebyshev_polynomial_v:lambdainput,n,out=None:-1,torch.special.shifted_chebyshev_polynomial_w:lambdainput,n,out=None:-1,torch.special.sinc:lambdainput:-1,torch.special.softmax:lambdainput,dim,dtype=None:-1,torch.special.spherical_bessel_j0:lambdainput:-1,torch.special.xlog1py:lambdainput,other,out=None:-1,torch.special.xlogy:lambdainput,other,out=None:-1,torch.special.zeta:lambdaself,other,out=None:-1,torch.t:lambdainput:-1,torch.take:lambdainput,index:-1,torch.take_along_dim:lambdainput,indices,dim=None,out=None:-1,torch.tan:lambdainput,out=None:-1,torch.tanh:lambdainput,out=None:-1,torch.linalg.tensorinv:lambdaa,ind=2:-1,torch.linalg.tensorsolve:lambdaa,b,dims=None:-1,torch.tensordot:lambdaa,b,dims=2,out=None:-1,torch.tensor_split:lambdainput,indices_or_sections,dim=0:-1,torch.threshold:lambdainput,threshold,value,inplace=False:-1,torch.tile:lambdainput,dims:-1,torch.topk:lambdainput,k,dim=-1,descending=False,out=None:-1,torch.trace:lambdainput:-1,torch.transpose:lambdainput,dim0,dim1:-1,torch.trapz:lambday,x=None,dim=-1:-1,torch.trapezoid:lambday,x=None,dim=-1:-1,torch.triangular_solve:lambdainput,A,upper=True,transpose=False,unitriangular=False:-1,torch.linalg.solve_triangular:lambdainput,B,upper,left=True,unitriangular=False:-1,torch.tril:lambdainput,diagonal=0,out=None:-1,torch.triplet_margin_loss:(lambdaanchor,positive,negative,margin=1.0,p=2,eps=1e-06,swap=False,size_average=None,reduce=None,reduction='mean':-1),torch.triu:lambdainput,diagonal=0,out=None:-1,torch.true_divide:lambdainput,other:-1,torch.trunc:lambdainput,out=None:-1,torch.unbind:lambdainput,dim=0:-1,torch.unflatten:lambdainput,dim,sizes,names:-1,torch.unique:lambdainput,sorted=True,return_inverse=False,return_counts=False,dim=None:-1,torch.unique_consecutive:lambdainput,return_inverse=False,return_counts=False,dim=None:-1,torch.unravel_index:lambdaindices,shape:-1,torch.unsafe_chunk:lambdainput,chunks,dim=0:-1,torch.unsafe_split:lambdatensor,split_size_or_sections,dim=0:-1,torch.unsafe_split_with_sizes:lambdatensor,split_size_or_sections,dim=0:-1,torch.unsqueeze:lambdainput,dim,out=None:-1,torch.linalg.vander:lambdax,N=None:-1,torch.var:lambdainput,dim=None:-1,torch.var_mean:lambdainput,dim=None:-1,torch.vsplit:lambdainput,indices_or_sections:-1,torch.vstack:lambdatensors,out=None:-1,torch.where:lambdacondition,x=None,y=None:-1,torch.zeros_like:lambdainput,dtype=None,layout=None,device=None,requires_grad=False:-1,torch._fw_primal_copy:lambdaself,level:-1,torch._make_dual_copy:lambdaprimal,tangent,level:-1,torch.view_as_real_copy:lambdaself:-1,torch.view_as_complex_copy:lambdaself:-1,torch._conj_copy:lambdaself:-1,torch._neg_view_copy:lambdaself:-1,torch.as_strided_copy:lambdaself,size,stride,storage_offset=None:-1,torch._sparse_broadcast_to_copy:lambdaself,size:-1,torch.diagonal_copy:lambdaself,offset=0,dim1=0,dim2=1:-1,torch.expand_copy:lambdaself,size,*,implicit=False:-1,torch.narrow_copy:lambdaself,dim,start,length:-1,torch.permute_copy:lambdaself,dims:-1,torch._reshape_alias_copy:lambdaself,size,stride:-1,torch.select_copy:lambdaself,dim,index:-1,torch.detach_copy:lambdaself:-1,torch.slice_copy:lambdaself,dim=0,start=None,end=None,step=1:-1,torch.split_copy:lambdaself,split_size,dim=0:-1,torch.split_with_sizes_copy:lambdaself,split_sizes,dim=0:-1,torch.squeeze_copy:lambdaself,dim:-1,torch.t_copy:lambdaself:-1,torch.transpose_copy:lambdaself,dim0,dim1:-1,torch.unsqueeze_copy:lambdaself,dim:-1,torch._indices_copy:lambdaself:-1,torch._values_copy:lambdaself:-1,torch.indices_copy:lambdaself:-1,torch.values_copy:lambdaself:-1,torch.crow_indices_copy:lambdaself:-1,torch.col_indices_copy:lambdaself:-1,torch.ccol_indices_copy:lambdaself:-1,torch.row_indices_copy:lambdaself:-1,torch.unbind_copy:lambdaself,dim=0:-1,torch.view_copy:lambdaself,dtype:-1,torch.unfold_copy:lambdaself,dimension,size,step:-1,torch.alias_copy:lambdaself:-1,Tensor.__floordiv__:lambdaself,other:-1,Tensor.__rfloordiv__:lambdaself,other:-1,Tensor.__ifloordiv__:lambdaself,other:-1,Tensor.__truediv__:lambdaself,other:-1,Tensor.__rtruediv__:lambdaself,other:-1,Tensor.__itruediv__:lambdaself,other:-1,Tensor.__lshift__:lambdaself,other:-1,Tensor.__rlshift__:lambdaself,other:-1,Tensor.__ilshift__:lambdaself,other:-1,Tensor.__rshift__:lambdaself,other:-1,Tensor.__rrshift__:lambdaself,other:-1,Tensor.__irshift__:lambdaself,other:-1,Tensor.__and__:lambdaself,other:-1,Tensor.__or__:lambdaself,other:-1,Tensor.__xor__:lambdaself,other:-1,Tensor.__float__:lambdaself:-1,Tensor.__complex__:lambdaself:-1,Tensor.__array__:lambdaself,dtype:-1,Tensor.__bool__:lambdaself:-1,Tensor.__contains__:lambdaself,other:-1,Tensor.__neg__:lambdaself:-1,Tensor.__invert__:lambdaself:-1,Tensor.__mod__:lambdaself,other:-1,Tensor.__rmod__:lambdaself,other:-1,Tensor.__imod__:lambdaself,other:-1,Tensor.__array_wrap__:lambdaself,array:-1,Tensor.__getitem__:lambdaself,idx:-1,Tensor.__deepcopy__:lambdaself,memo:-1,Tensor.__int__:lambdaself:-1,Tensor.__long__:lambdaself:-1,Tensor.__index__:lambdaself:-1,Tensor.__len__:lambdaself:-1,Tensor.__format__:lambdaself,format_spec:-1,Tensor.__reduce_ex__:lambdaself,proto:-1,Tensor.__reversed__:lambdaself:-1,Tensor.__repr__:lambdaself,*,tensor_contents=None:-1,Tensor.__setitem__:lambdaself,k,v:-1,Tensor.__setstate__:lambdaself,d:-1,Tensor.T.__get__:lambdaself:-1,Tensor.H.__get__:lambdaself:-1,Tensor.mT.__get__:lambdaself:-1,Tensor.mH.__get__:lambdaself:-1,Tensor._backward_hooks.__get__:lambdaself:-1,Tensor._post_accumulate_grad_hooks.__get__:lambdaself:-1,Tensor._base.__get__:lambdaself:-1,Tensor._cdata.__get__:lambdaself:-1,Tensor.grad.__get__:lambdaself:-1,Tensor._grad.__get__:lambdaself:-1,Tensor._grad_fn.__get__:lambdaself:-1,Tensor.grad_fn.__get__:lambdaself:-1,Tensor._version.__get__:lambdaself:-1,Tensor._autocast_to_reduced_precision:lambdaself,cuda_enabled,cpu_enabled,cuda_dtype,cpu_dtype:-1,Tensor._autocast_to_full_precision:lambdaself,cuda_enabled,cpu_enabled:-1,Tensor.data.__get__:lambdaself:-1,Tensor.device.__get__:lambdaself:-1,Tensor.dtype.__get__:lambdaself:-1,Tensor.is_cuda.__get__:lambdaself:-1,Tensor.is_cpu.__get__:lambdaself:-1,Tensor.is_xla.__get__:lambdaself:-1,Tensor.is_xpu.__get__:lambdaself:-1,Tensor.is_ipu.__get__:lambdaself:-1,Tensor.is_leaf.__get__:lambdaself:-1,Tensor.retains_grad.__get__:lambdaself:-1,Tensor.is_meta.__get__:lambdaself:-1,Tensor.is_mps.__get__:lambdaself:-1,Tensor.is_mtia.__get__:lambdaself:-1,Tensor.is_nested.__get__:lambdaself:-1,Tensor.is_ort.__get__:lambdaself:-1,Tensor.is_mkldnn.__get__:lambdaself:-1,Tensor.is_quantized.__get__:lambdaself:-1,Tensor.is_sparse.__get__:lambdaself:-1,Tensor.is_sparse_csr.__get__:lambdaself:-1,Tensor.is_vulkan.__get__:lambdaself:-1,Tensor.itemsize.__get__:lambdaself:-1,Tensor.layout.__get__:lambdaself:-1,Tensor.name.__get__:lambdaself:-1,Tensor.names.__get__:lambdaself:-1,Tensor.nbytes.__get__:lambdaself:-1,Tensor.ndim.__get__:lambdaself:-1,Tensor.output_nr.__get__:lambdaself:-1,Tensor.requires_grad.__get__:lambdaself:-1,Tensor.shape.__get__:lambdaself:-1,Tensor.volatile.__get__:lambdaself:-1,Tensor.real.__get__:lambdaself:-1,Tensor.imag.__get__:lambdaself:-1,Tensor.__cuda_array_interface__.__get__:lambdaself:-1,Tensor.type:lambdaself,dtype=None,non_blocking=False,**kwargs:-1,Tensor._dimI:lambdaself:-1,Tensor._dimV:lambdaself:-1,Tensor._indices:lambdaself:-1,Tensor._is_view:lambdaself:-1,Tensor._nnz:lambdaself:-1,Tensor.crow_indices:lambdaself:-1,Tensor.col_indices:lambdaself:-1,Tensor.ccol_indices:lambdaself:-1,Tensor.row_indices:lambdaself:-1,Tensor._update_names:lambdaself,names,inplace:-1,Tensor._values:lambdaself:-1,Tensor.adjoint:lambdaself:-1,Tensor.align_as:lambdaself,other:-1,Tensor.align_to:lambdaself,order,ellipsis_idx:-1,Tensor.apply_:lambdaself,callable:-1,Tensor.as_strided:lambdaself,size,stride:-1,Tensor.as_strided_:lambdaself,size,stride:-1,Tensor.backward:lambdaself,gradient=None,retain_graph=None,create_graph=False,inputs=None:-1,Tensor.bfloat16:lambdaself,memory_format=torch.preserve_format:-1,Tensor.bool:lambdaself,memory_format=torch.preserve_format:-1,Tensor.byte:lambdaself,memory_format=torch.preserve_format:-1,Tensor.char:lambdaself,memory_format=torch.preserve_format:-1,Tensor.cauchy_:lambdaself,median=0,sigma=1,*,generator=None:-1,Tensor.coalesce:lambdaself:-1,Tensor._coalesced_:lambdaself,coalesced:-1,Tensor.contiguous:lambdaself,memory_format=torch.contiguous_format:-1,Tensor.copy_:lambdaself,src,non_blocking=False:-1,Tensor.cpu:lambdaself,memory_format=torch.preserve_format:-1,Tensor.cuda:lambdaself,memory_format=torch.preserve_format:-1,Tensor.xpu:lambdaself,memory_format=torch.preserve_format:-1,Tensor.ipu:lambdaself,memory_format=torch.preserve_format:-1,Tensor.data_ptr:lambdaself:-1,Tensor.dense_dim:lambdaself:-1,Tensor.diagonal_scatter:lambdaself,src,offset=0,dim1=0,dim2=1:-1,Tensor.dim:lambdaself:-1,Tensor.dim_order:lambdaself:-1,Tensor.double:lambdaself,memory_format=torch.preserve_format:-1,Tensor.cdouble:lambdaself,memory_format=torch.preserve_format:-1,Tensor.element_size:lambdaself:-1,Tensor.expand:lambdaself,size:-1,Tensor.expand_as:lambdaself,other:-1,Tensor.exponential_:lambdaself,lambd=1,*,generator=None:-1,Tensor.fill_:lambdaself,value:-1,Tensor.fill_diagonal_:lambdaself,value:-1,Tensor.float:lambdaself,memory_format=torch.preserve_format:-1,Tensor.cfloat:lambdaself,memory_format=torch.preserve_format:-1,Tensor.geometric_:lambdaself,p,*,generator=None:-1,Tensor.get_device:lambdaself:-1,Tensor.half:lambdaself,memory_format=torch.preserve_format:-1,Tensor.chalf:lambdaself,memory_format=torch.preserve_format:-1,Tensor.has_names:lambdaself:-1,Tensor.indices:lambdaself:-1,Tensor.int:lambdaself,memory_format=torch.preserve_format:-1,Tensor.is_coalesced:lambdaself:-1,Tensor.is_contiguous:lambdaself:-1,Tensor.is_inference:lambdaself:-1,Tensor.is_pinned:lambdaself:-1,Tensor.is_set_to:lambdaself,tensor:-1,Tensor.is_shared:lambdaself:-1,Tensor.item:lambdaself:-1,Tensor.log_normal_:lambdaself,mean=1,std=2,*,generator=None:-1,Tensor.log_softmax:lambdaself,dim:-1,Tensor.long:lambdaself,memory_format=torch.preserve_format:-1,Tensor.map_:lambdaself,tensor,callable:-1,Tensor.map2_:lambdaself,x,y,callable:-1,Tensor.mm:lambdaself,mat2:-1,Tensor.module_load:lambdaself,other,assign=False:-1,Tensor.narrow_copy:lambdaself,dimension,start,length:-1,Tensor.ndimension:lambdaself:-1,Tensor.nelement:lambdaself:-1,Tensor._nested_tensor_size:lambdaself:-1,Tensor._nested_tensor_storage_offsets:lambdaself:-1,Tensor._nested_tensor_strides:lambdaself:-1,Tensor.normal_:lambdaself:-1,Tensor.numpy:lambdaself:-1,Tensor.permute:lambdaself,dim:-1,Tensor.pin_memory:lambdaself:-1,Tensor.put_:lambdaself,indices,tensor,accumulate=False:-1,Tensor.qscheme:lambdaself:-1,Tensor.random_:lambdaself,from_=0,to=None,*,generator=None:-1,Tensor.record_stream:lambdaself,stream:-1,Tensor.refine_names:lambdaself,names:-1,Tensor.register_hook:lambdaself,hook:-1,Tensor.register_post_accumulate_grad_hook:lambdaself,hook:-1,Tensor.rename:lambdaself,name:-1,Tensor.repeat:lambdaself,*size:-1,Tensor.requires_grad_:lambdaself,requires_grad=True:-1,Tensor.reshape_as:lambdaself,other:-1,Tensor.resize:lambdaself,*size:-1,Tensor.resize_:lambdaself,size:-1,Tensor.resize_as:lambdaself,other:-1,Tensor.resize_as_sparse_:lambdaself,other:-1,Tensor.retain_grad:lambdaself:-1,Tensor.set_:lambdaself,source=None,storage_offset=0,size=None,stride=None:-1,Tensor.select_scatter:lambdaself,src,dim,index:-1,Tensor.share_memory_:lambdaself:-1,Tensor.short:lambdaself,memory_format=torch.preserve_format:-1,Tensor.size:lambdaself:-1,Tensor.slice_scatter:lambdaself,src,dim=0,start=None,end=None,step=1:-1,Tensor.sparse_dim:lambdaself:-1,Tensor.sparse_mask:lambdaself,mask:-1,Tensor._sparse_mask_projection:lambdaself,mask,accumulate_matches=False:-1,Tensor.sparse_resize_:lambdaself,size1,size2,dense_dim:-1,Tensor.sparse_resize_and_clear_:lambdaself,size1,size2,dense_dim:-1,Tensor.sspaddmm:lambdaself,mat1,mat2,beta=1,alpha=1,out=None:-1,Tensor.storage:lambdaself:-1,Tensor.untyped_storage:lambdaself:-1,Tensor.storage_offset:lambdaself:-1,Tensor.storage_type:lambdaself:-1,Tensor.sum_to_size:lambdaself,size:-1,Tensor.tile:lambdaself,*reps:-1,Tensor.to:lambdaself,dtype,non_blocking=False,copy=False,memory_format=torch.preserve_format:-1,Tensor.to_dense:lambdaself,dtype=None,*,masked_grad=None:-1,Tensor._to_dense:lambdaself,dtype=None,masked_grad=None:-1,Tensor.to_sparse:lambdaself:-1,Tensor.tolist:lambdaself:-1,Tensor.to_mkldnn:lambdaself:-1,Tensor.type_as:lambdaself,other:-1,Tensor.unfold:lambdaself,dimension,size,step:-1,Tensor.uniform_:lambdaself,from_=0,to=1:-1,Tensor.values:lambdaself:-1,Tensor.view:lambdaself,shape:-1,Tensor.view_as:lambdaself,other:-1,Tensor.zero_:lambdaself:-1,Tensor.__dlpack__:lambdaself,stream=None:-1,Tensor.__dlpack_device__:lambdaself:-1,torch.linalg.lstsq:lambdaself,b,cond=None,driver=None:-1,}ret2={}ignored=get_ignored_functions()fork,vinret.items():# Generate methods like __add__ and add_ by default from addnames=[k.__name__,# Default methodk.__name__+"_",# Inplace variant"__"+k.__name__+"__",# Dunder method"__i"+k.__name__+"__",# Inplace dunder method"__r"+k.__name__+"__",# Reverse dunder method]ifk.__name__.startswith("bitwise_"):# bitwise_<op> have dunder methods of the form __<op>__# And so on.subname=k.__name__[len("bitwise_"):]names.extend(["__"+subname+"__","__i"+subname+"__","__r"+subname+"__"])fornameinnames:func=getattr(Tensor,name,None)ifcallable(func)andfuncnotinretandfuncnotinignored:ret2[func]=vret.update(ret2)returnret
[docs]defwrap_torch_function(dispatcher:Callable):"""Wraps a given function with ``__torch_function__`` -related functionality. Parameters ---------- dispatcher: Callable A callable that returns an iterable of Tensor-likes passed into the function. Note ---- This decorator may reduce the performance of your code. Generally, it's enough to express your code as a series of functions that, themselves, support __torch_function__. If you find yourself in the rare situation where this is not the case, e.g. if you're wrapping a low-level library and you also need it to work for Tensor-likes, then this function is available. Examples -------- >>> def dispatcher(a): # Must have the same signature as func ... return (a,) >>> @torch.overrides.wrap_torch_function(dispatcher) >>> def func(a): # This will make func dispatchable by __torch_function__ ... return a + 0 """definner(func):@functools.wraps(func)defwrapped(*args,**kwargs):relevant_args=dispatcher(*args,**kwargs)ifhas_torch_function(relevant_args):returnhandle_torch_function(wrapped,relevant_args,*args,**kwargs)returnfunc(*args,**kwargs)returnwrappedreturninner
def_get_overloaded_args(relevant_args:Iterable[Any],get_type_fn:Callable[[Any],Type]=None)->List[Any]:"""Returns a list of arguments on which to call __torch_function__. Checks arguments in relevant_args for __torch_function__ implementations, storing references to the arguments and their types in overloaded_args and overloaded_types in order of calling precedence. Only distinct types are considered. If a type is a subclass of another type it will have higher precedence, otherwise the precedence order is the same as the order of arguments in relevant_args, that is, from left-to-right in the argument list. The precedence-determining algorithm implemented in this function is described in `NEP-0018`_. See torch::append_overloaded_arg for the equivalent function in the C++ implementation. Parameters ---------- relevant_args : iterable of array-like Iterable of array-like arguments to check for __torch_function__ methods. get_type_fn : callable, optional Function to call on each argument in relevant_args to get its type. Returns ------- overloaded_args : list Arguments from relevant_args on which to call __torch_function__ methods, in the order in which they should be called. .. _NEP-0018: https://numpy.org/neps/nep-0018-array-function-protocol.html """ifget_type_fnisNone:get_type_fn=type# If torch function is not enabled, there are no overloaded typesifnottorch._C._is_torch_function_enabled():return[]# Runtime is O(num_arguments * num_unique_types)overloaded_types:Set[Type]=set()overloaded_args:List[Any]=[]forarginrelevant_args:arg_type=get_type_fn(arg)# We only collect arguments if they have a unique type, which ensures# reasonable performance even with a long list of possibly overloaded# arguments.## NB: Important to exclude _disabled_torch_function_impl, otherwise# https://github.com/pytorch/pytorch/issues/64687if(arg_typenotinoverloaded_typesandhasattr(arg_type,'__torch_function__')andarg_type.__torch_function__!=torch._C._disabled_torch_function_impl):# Create lists explicitly for the first type (usually the only one# done) to avoid setting up the iterator for overloaded_args.ifoverloaded_types:overloaded_types.add(arg_type)# By default, insert argument at the end, but if it is# subclass of another argument, insert it before that argument.# This ensures "subclasses before superclasses".index=len(overloaded_args)fori,old_arginenumerate(overloaded_args):ifissubclass(arg_type,get_type_fn(old_arg)):index=ibreakoverloaded_args.insert(index,arg)else:overloaded_types={arg_type}overloaded_args=[arg]returnoverloaded_args
[docs]defhandle_torch_function(public_api:Callable,relevant_args:Iterable[Any],*args,**kwargs)->Any:"""Implement a function with checks for ``__torch_function__`` overrides. See torch::autograd::handle_torch_function for the equivalent of this function in the C++ implementation. Arguments --------- public_api : function Function exposed by the public torch API originally called like ``public_api(*args, **kwargs)`` on which arguments are now being checked. relevant_args : iterable Iterable of arguments to check for __torch_function__ methods. args : tuple Arbitrary positional arguments originally passed into ``public_api``. kwargs : tuple Arbitrary keyword arguments originally passed into ``public_api``. Returns ------- object Result from calling ``implementation`` or an ``__torch_function__`` method, as appropriate. Raises ------ TypeError : if no implementation is found. Example ------- >>> def func(a): ... if has_torch_function_unary(a): ... return handle_torch_function(func, (a,), a) ... return a + 0 """# Check for __torch_function__ methods.overloaded_args=_get_overloaded_args(relevant_args)# overloaded_args already have unique types.types=tuple(map(type,overloaded_args))# Check for __torch_function__ mode.if_is_torch_function_mode_enabled():# if we're here, the mode must be set to a TorchFunctionStackMode# this unsets it and calls directly into TorchFunctionStackMode's torch functionwith_pop_mode_temporarily()asmode:result=mode.__torch_function__(public_api,types,args,kwargs)ifresultisnotNotImplemented:returnresult# Call overridesforoverloaded_arginoverloaded_args:# This call needs to become a classmethod call in the future.# See https://github.com/pytorch/pytorch/issues/63767torch_func_method=overloaded_arg.__torch_function__ifhasattr(torch_func_method,"__self__")andtorch_func_method.__self__isoverloaded_argand \
torch_func_methodisnottorch._C._disabled_torch_function_impl:warnings.warn("Defining your `__torch_function__ as a plain method is deprecated and ""will be an error in future, please define it as a classmethod.",DeprecationWarning)# Use `public_api` instead of `implementation` so __torch_function__# implementations can do equality/identity comparisons.result=torch_func_method(public_api,types,args,kwargs)ifresultisnotNotImplemented:returnresultfunc_name=f'{public_api.__module__}.{public_api.__name__}'msg=(f"no implementation found for '{func_name}' on types that implement "f'__torch_function__: {[type(arg)forarginoverloaded_args]}')if_is_torch_function_mode_enabled():msg+=f" nor in mode {_get_current_function_mode()}"raiseTypeError(msg)
has_torch_function=_add_docstr(_has_torch_function,r"""Check for __torch_function__ implementations in the elements of an iterable or if a __torch_function__ mode is enabled. Considers exact ``Tensor`` s and ``Parameter`` s non-dispatchable. Use this to guard a call to :func:`handle_torch_function`; don't use it to test if something is Tensor-like, use :func:`is_tensor_like` instead. Arguments --------- relevant_args : iterable Iterable or arguments to check for __torch_function__ methods. Returns ------- bool True if any of the elements of relevant_args have __torch_function__ implementations, False otherwise. See Also ________ torch.is_tensor_like Checks if something is a Tensor-like, including an exact ``Tensor``. """)has_torch_function_unary=_add_docstr(_has_torch_function_unary,r"""Special case of `has_torch_function` for single inputs. Instead of: `has_torch_function((t,))` call: `has_torch_function_unary(t)` which skips unnecessary packing and unpacking work. """)has_torch_function_variadic=_add_docstr(_has_torch_function_variadic,r"""Special case of `has_torch_function` that skips tuple creation. This uses the METH_FASTCALL protocol introduced in Python 3.7 Instead of: `has_torch_function((a, b))` call: `has_torch_function_variadic(a, b)` which skips unnecessary packing and unpacking work. """)@functools.lru_cache(None)def_get_overridable_functions()->Tuple[Dict[Any,List[Callable]],Dict[Callable,str]]:overridable_funcs=collections.defaultdict(list)index={}tested_namespaces=[("torch",torch,torch.__all__),("torch.functional",torch.functional,torch.functional.__all__),("torch.nn.functional",torch.nn.functional,dir(torch.nn.functional)),("torch.nn.init",torch.nn.init,dir(torch.nn.init)),("torch.Tensor",torch.Tensor,dir(torch.Tensor)),("torch.linalg",torch.linalg,dir(torch.linalg)),("torch.fft",torch.fft,dir(torch.fft)),("torch.special",torch.special,dir(torch.special)),]fornamespace_str,namespace,ns_funcsintested_namespaces:forfunc_nameinns_funcs:ignore=False# ignore private functions or functions that are deleted in torch.__init__ifnamespaceisnottorch.Tensor:iffunc_name.startswith('__'):continueeliffunc_name.startswith('_'):ignore=Trueeliffunc_name.endswith('_'):ignore=Trueelifnotfunc_name[0].islower():ignore=Trueeliffunc_name=='unique_dim':continueelse:func=getattr(namespace,func_name)ifgetattr(object,func_name,None)==func:continueiffunc_name=='__weakref__':continuefunc=getattr(namespace,func_name)ifnamespaceistorch.Tensorandgetattr(object,func_name,None)==func:continue# ignore re-exported modulesifisinstance(func,types.ModuleType):continue# ignore __future__ importsifisinstance(func,__future__._Feature):continueifnotcallable(func)andhasattr(func,"__get__"):index[func.__get__]=f"{namespace_str}.{func_name}.__get__"index[func.__set__]=f"{namespace_str}.{func_name}.__set__"ifignore:continueiffunc.__get__inget_ignored_functions():msg=("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions ""but still has an explicit override")assertfunc.__get__notinget_testing_overrides(),msg.format(namespace,func.__name__)continueelse:overridable_funcs[func].append(func.__get__)continueifnotcallable(func):continueindex[func]=f"{namespace_str}.{func_name}"ifignore:continue# cannot be overriden by __torch_function__iffuncinget_ignored_functions():msg=("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions ""but still has an explicit override")assertfuncnotinget_testing_overrides(),msg.format(namespace,func.__name__)continueoverridable_funcs[namespace].append(func)returnoverridable_funcs,index
[docs]@_disable_user_warningsdefget_overridable_functions()->Dict[Any,List[Callable]]:"""List functions that are overridable via __torch_function__ Returns ------- Dict[Any, List[Callable]] A dictionary that maps namespaces that contain overridable functions to functions in that namespace that can be overridden. """return_get_overridable_functions()[0]
[docs]@_disable_user_warningsdefresolve_name(f):"""Get a human readable string name for a function passed to __torch_function__ Arguments --------- f : Callable Function to resolve the name of. Returns ------- str Name of the function; if eval'ed it should give back the input function. """ifisinstance(f,(torch._ops.OpOverload,torch._ops.OpOverloadPacket)):returnstr(f)return_get_overridable_functions()[1].get(f)
@functools.lru_cache(None)def_get_tensor_methods()->Set[Callable]:""" Returns a set of the overridable methods on ``torch.Tensor`` """overridable_funcs=get_overridable_functions()methods=set(overridable_funcs[torch.Tensor])returnmethods
[docs]@_disable_user_warningsdefis_tensor_method_or_property(func:Callable)->bool:""" Returns True if the function passed in is a handler for a method or property belonging to ``torch.Tensor``, as passed into ``__torch_function__``. .. note:: For properties, their ``__get__`` method must be passed in. This may be needed, in particular, for the following reasons: 1. Methods/properties sometimes don't contain a `__module__` slot. 2. They require that the first passed-in argument is an instance of ``torch.Tensor``. Examples -------- >>> is_tensor_method_or_property(torch.Tensor.add) True >>> is_tensor_method_or_property(torch.add) False """returnfuncin_get_tensor_methods()orfunc.__name__=="__get__"
[docs]defis_tensor_like(inp):""" Returns ``True`` if the passed-in input is a Tensor-like. Currently, this occurs whenever there's a ``__torch_function__`` attribute on the type of the input. Examples -------- A subclass of tensor is generally a Tensor-like. >>> class SubTensor(torch.Tensor): ... >>> is_tensor_like(SubTensor([0])) True Built-in or user types aren't usually Tensor-like. >>> is_tensor_like(6) False >>> is_tensor_like(None) False >>> class NotATensor: ... >>> is_tensor_like(NotATensor()) False But, they can be made Tensor-like by implementing __torch_function__. >>> class TensorLike: ... @classmethod ... def __torch_function__(cls, func, types, args, kwargs): ... return -1 >>> is_tensor_like(TensorLike()) True """returntype(inp)istorch.Tensororhasattr(inp,"__torch_function__")
classTorchFunctionMode:""" A ``TorchFunctionMode`` allows you to override the meaning of all ``__torch_function__`` overrideable functions within a dynamic scope, without having to actually create a tensor subclass or manually monkey-patch functions in the PyTorch API. Some common situations where you should use a mode: * You want to override the meaning of factory functions, or other functions that do not otherwise take a tensor as an argument (these cannot be overridden with tensor subclasses). * You want to override the behavior of all functions without needing to wrap your inputs in tensor subclasses; e.g., if you are just interested in logging intermediate computations. * You want to control the order of execution of various tensor subclasses explicitly, rather than implicitly via the return of ``NotImplemented``. Independent subclasses of :class:`TorchFunctionMode` are compositional: modes can be pushed onto a stack using ``with MyMode():``. When you call functions in the PyTorch API inside your ``__torch_function__`` implementation, by default, they will forward on to the next mode on the mode stack. If you want recursively call back into your current ``__torch_function__`` implementation, either explicitly invoke ``self.__torch_function__(...)``, or use the context manager ``enable_torch_function_mode(self, replace=self.inner)`` to make PyTorch API self-referential (beware of infinite loops, in this case!) """inner:"TorchFunctionMode"# Force metaclass to generate constructor at the base of the hierarchydef__init__(self):passdef__torch_function__(self,func,types,args=(),kwargs=None):raiseNotImplementedError()def__enter__(self):_push_mode(self)returnselfdef__exit__(self,exc_type,exc_val,exc_tb):_pop_mode()@classmethoddefpush(cls,*args,**kwargs):warnings.warn("`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`")instance=cls(*args,**kwargs)returninstancedef_get_current_function_mode():stack_len=_len_torch_function_stack()return_get_function_stack_at(stack_len-1)ifstack_len>0elseNonedef_get_current_function_mode_stack():stack_len=_len_torch_function_stack()return[_get_function_stack_at(i)foriinrange(stack_len)]def_push_mode(mode):_push_on_torch_function_stack(mode)def_pop_mode():old=_pop_torch_function_stack()returnold@contextlib.contextmanagerdef_pop_mode_temporarily():old=_pop_mode()try:yieldoldfinally:_push_mode(old)classBaseTorchFunctionMode(TorchFunctionMode):def__torch_function__(self,func,types,args=(),kwargs=None):ifkwargsisNone:kwargs={}returnfunc(*args,**kwargs)@contextlib.contextmanagerdefenable_reentrant_dispatch():# NB: this can't simply be# `enable_reentrant_dispatch = torch._C._RestorePythonTLSSnapshot`# because:# 1. torch._C._RestorePythonTLSSnapshot is unavailable when this file# initially gets imported. Probably an import order thing.# 2. enable_reentrant_dispatch is technically public API; assigning# it the object would change the __module__ to look private.withtorch._C._RestorePythonTLSSnapshot():try:yieldfinally:pass
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.