[CI] 【Hackathon 9th Sprint No.19】NO.19 功能模块单测补充 by xunyoyo · Pull Request #5063 · PaddlePaddle/FastDeploy (original) (raw)
class _FakeTensor:
__array_priority__ = 1000
def __init__(self, array: Any, dtype: Optional[str] = None, place: Optional[_FakePlace] = None) -> None:
if isinstance(array, _FakeTensor):
array = array.array
if dtype is not None:
self.array = np.array(array, dtype=dtype)
else:
self.array = np.array(array)
self.place = place or _FakePlace(False)
def __repr__(self) -> str: # pragma: no cover - debug helper
return f"_FakeTensor({self.array!r})"
def __len__(self) -> int:
return len(self.array)
@property
def dtype(self): # pragma: no cover - compatibility helper
return self.array.dtype
@property
def shape(self):
return self.array.shape
def numpy(self):
return self.array
def tolist(self):
return self.array.tolist()
def item(self):
return self.array.item()
def astype(self, dtype: str) -> "_FakeTensor":
return _FakeTensor(self.array.astype(dtype), place=self.place)
def unsqueeze(self, axis: int) -> "_FakeTensor":
return _FakeTensor(np.expand_dims(self.array, axis=axis), place=self.place)
def split(self, lengths: List[int]):
outputs = []
start = 0
for length in lengths:
outputs.append(_FakeTensor(self.array[start : start + length], place=self.place))
start += length
return outputs
def cuda(self) -> "_FakeTensor":
return _FakeTensor(self.array.copy(), place=_FakePlace(True))
def __getitem__(self, item):
if isinstance(item, _FakeTensor):
item = item.array
result = self.array.__getitem__(item)
if isinstance(result, np.ndarray):
return _FakeTensor(result, place=self.place)
return result
def __setitem__(self, key, value):
if isinstance(value, _FakeTensor):
value = value.array
self.array.__setitem__(key, value)
def __iter__(self):
if self.array.ndim == 1:
for value in self.array:
yield value.item() if hasattr(value, "item") else value
else:
for row in self.array:
yield _FakeTensor(row, place=self.place)
def _binary_op(self, other: Any, op):
other_array = other.array if isinstance(other, _FakeTensor) else other
return _FakeTensor(op(self.array, other_array), place=self.place)
def __add__(self, other):
return self._binary_op(other, np.add)
def __radd__(self, other):
return self._binary_op(other, np.add)
def __sub__(self, other):
return self._binary_op(other, np.subtract)
def __rsub__(self, other):
return _FakeTensor(other, place=self.place)._binary_op(self, np.subtract)
def __truediv__(self, other):
return self._binary_op(other, np.divide)
def __mul__(self, other): # pragma: no cover - completeness helper
return self._binary_op(other, np.multiply)
def __eq__(self, other):
other_array = other.array if isinstance(other, _FakeTensor) else other
return self.array == other_array