ASSERT_EQ(flops, 360);
}
+TEST(TestConstant, TensorGrad) {
+ auto graph = std::make_shared<Graph>();
+ IValue ten = torch::randn({3, 5}).requires_grad_(true);
+ auto con = tryInsertConstant(*graph, ten);
+ ASSERT_TRUE(con == c10::nullopt);
+}
+
TEST(TestMutation, Basic) {
auto graph = std::make_shared<Graph>();
std::unordered_map<std::string, Value*> vmap;
namespace jit {
bool insertableTensor(const at::Tensor& ten) {
- return !ten.requires_grad();
+ // bail if tensor has no storage i.e. opaque tensor used in MKLdnn.
+ // or gradients because we have no way of serializing them & are mutable
+ return !ten.requires_grad() && ten.has_storage();
}
bool insertableIValue(const IValue& ivalue) {
Node* n = g.create(prim::Constant);
if (val.isTensor()) {
at::Tensor ref = val.toTensor();
- if (!ref.has_storage()) {
- // bail if tensor has no storage i.e. opaque tensor used in MKLdnn.
+ if (!insertableTensor(val.toTensor())) {
n->destroy();
return c10::nullopt;
}