_(prim, AnyDefined) \
_(prim, FusedConcat) \
_(prim, ConstantChunk) \
- _(prim, NoneGenerator) \
_(prim, MMTreeReduce) \
_(prim, MMBatchSide) \
_(aten, warn) \
'int64_t?': 'int?',
'double': 'float',
'bool': 'bool',
- 'Generator': 'Generator',
+ 'Generator': 'Generator?',
}
bool Node::isNondeterministic() const {
static const OperatorSet nondeterministic_ops = {
"aten::dropout(Tensor input, float p, bool train) -> Tensor",
- "aten::_fused_dropout(Tensor self, float p, Generator generator) -> (Tensor, Tensor)",
- "aten::_standard_gamma(Tensor self, Generator generator) -> Tensor",
- "aten::bernoulli(Tensor self, *, Generator generator) -> Tensor",
- "aten::bernoulli(Tensor self, float p, *, Generator generator) -> Tensor",
- "aten::multinomial(Tensor self, int num_samples, bool replacement, *, Generator generator) -> Tensor",
- "aten::normal(Tensor mean, Tensor std, *, Generator generator) -> Tensor",
- "aten::normal(float mean, Tensor std, *, Generator generator) -> Tensor",
- "aten::normal(Tensor mean, float std, *, Generator generator) -> Tensor",
- "aten::poisson(Tensor self, Generator generator) -> Tensor",
- "aten::rrelu(Tensor self, Scalar lower, Scalar upper, bool training, Generator generator) -> Tensor",
- "aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, Generator generator) -> Tensor",
+ "aten::_fused_dropout(Tensor self, float p, Generator? generator) -> (Tensor, Tensor)",
+ "aten::_standard_gamma(Tensor self, Generator? generator) -> Tensor",
+ "aten::bernoulli(Tensor self, *, Generator? generator) -> Tensor",
+ "aten::bernoulli(Tensor self, float p, *, Generator? generator) -> Tensor",
+ "aten::multinomial(Tensor self, int num_samples, bool replacement, *, Generator? generator) -> Tensor",
+ "aten::normal(Tensor mean, Tensor std, *, Generator? generator) -> Tensor",
+ "aten::normal(float mean, Tensor std, *, Generator? generator) -> Tensor",
+ "aten::normal(Tensor mean, float std, *, Generator? generator) -> Tensor",
+ "aten::poisson(Tensor self, Generator? generator) -> Tensor",
+ "aten::rrelu(Tensor self, Scalar lower, Scalar upper, bool training, Generator? generator) -> Tensor",
+ "aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, Generator? generator) -> Tensor",
"aten::rand(int[] size, *, int dtype, int layout, Device device) -> Tensor",
"aten::rand_like(Tensor self) -> Tensor",
"aten::rand_like(Tensor self, *, int dtype, int layout, Device device) -> Tensor",
return n;
}
-Node * Graph::createNoneGenerator() {
- auto n = create(prim::NoneGenerator);
- n->output()->setType(GeneratorType::get());
- return n;
-}
-
Node * Graph::createFusionGroup() {
auto n = create(prim::FusionGroup, 0);
n->g_(attr::Subgraph,std::make_shared<Graph>(current_scope()));
TORCH_API Node* createNone(TypePtr typ); // value of None with type Optional[typ]
TORCH_API Node* createUndefined();
- TORCH_API Node* createNoneGenerator();
TORCH_API Node* createFusionGroup();
TORCH_API Node* createDifferentiableSubgraph();
TORCH_API Node* createTuple(at::ArrayRef<Value*> values);
prim::Loop, //TODO: handle Loop
prim::Constant,
prim::Undefined,
- prim::NoneGenerator,
prim::None, // it is already a constant and propagating it will lose
// important type information about which Optional type it is
// TODO (zach): we should consider skipping tensor factories in the cases
bool isConstantLike(Node* n) {
switch(n->kind()) {
case prim::Constant:
- case prim::NoneGenerator:
case prim::Undefined:
case prim::None:
return true;
IValue v = toIValue(node->output()).value();
printConstant(stmt, v);
} break;
- case prim::NoneGenerator:
case prim::Undefined:
case prim::None: {
if (node->output()->type()->isSubtypeOf(NoneType::get())) {
prim::ListConstruct,
prim::ListUnpack,
prim::None,
- prim::NoneGenerator,
prim::Print,
prim::PythonOp,
prim::TupleConstruct,
"aten::ceil(Tensor self) -> Tensor",
"aten::clone(Tensor self) -> Tensor",
"aten::contiguous(Tensor self) -> Tensor",
- "aten::bernoulli(Tensor self, *, Generator generator) -> Tensor",
+ "aten::bernoulli(Tensor self, *, Generator? generator) -> Tensor",
"aten::celu(Tensor self, Scalar alpha) -> Tensor",
"aten::clamp(Tensor self, Scalar? min, Scalar? max) -> Tensor",
"aten::clamp_max(Tensor self, Scalar max) -> Tensor",
"aten::clamp_min(Tensor self, Scalar min) -> Tensor",
"aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor",
- "aten::bernoulli(Tensor self, float p, *, Generator generator) -> Tensor",
+ "aten::bernoulli(Tensor self, float p, *, Generator? generator) -> Tensor",
"aten::cos(Tensor self) -> Tensor",
"aten::cosh(Tensor self) -> Tensor",
"aten::digamma(Tensor self) -> Tensor",
"aten::leaky_relu(Tensor self, Scalar negative_slope) -> Tensor",
"aten::lgamma(Tensor self) -> Tensor",
"aten::mvlgamma(Tensor self, int p) -> Tensor",
- "aten::normal(float mean, Tensor std, *, Generator generator) -> Tensor",
- "aten::normal(Tensor mean, float std, *, Generator generator) -> Tensor",
+ "aten::normal(float mean, Tensor std, *, Generator? generator) -> Tensor",
+ "aten::normal(Tensor mean, float std, *, Generator? generator) -> Tensor",
"aten::permute(Tensor self, int[] dims) -> Tensor",
"aten::pin_memory(Tensor self) -> Tensor",
"aten::pinverse(Tensor self, float rcond) -> Tensor",
"aten::reciprocal(Tensor self) -> Tensor",
"aten::relu(Tensor self) -> Tensor",
"aten::round(Tensor self) -> Tensor",
- "aten::rrelu(Tensor self, Scalar lower, Scalar upper, bool training, Generator generator) -> Tensor",
+ "aten::rrelu(Tensor self, Scalar lower, Scalar upper, bool training, Generator? generator) -> Tensor",
"aten::rsqrt(Tensor self) -> Tensor",
"aten::selu(Tensor self) -> Tensor",
"aten::sigmoid(Tensor self) -> Tensor",
// tensor outputs : 1
static const register_formula_for binary_ops_strict_match{
{
- "aten::normal(Tensor mean, Tensor std, *, Generator generator) -> Tensor",
+ "aten::normal(Tensor mean, Tensor std, *, Generator? generator) -> Tensor",
"aten::mm(Tensor self, Tensor mat2) -> Tensor",
"aten::bmm(Tensor self, Tensor mat2) -> Tensor",
},
};
}),
Operator(
- "prim::NoneGenerator() -> Generator",
- [](const Node* node) {
- return [](Stack& stack) {
- stack.emplace_back();
- return 0;
- };
- }),
- Operator(
prim::Print,
[](const Node* node) {
size_t num_inputs = node->inputs().size();
}
if (value->type()->isSubtypeOf(NoneType::get()) && !concrete_type->isSubtypeOf(NoneType::get())){
- if (concrete_type->isSubtypeOf(GeneratorType::get())) {
- value = graph.insertNode(graph.createNoneGenerator())->output();
- } else if (concrete_type->isSubtypeOf(OptionalType::ofTensor())) {
+ if (concrete_type->isSubtypeOf(OptionalType::ofTensor())) {
// create undefined tensor when None pass to a optional[tensor] formal arg
value = graph.insertNode(graph.createUndefined())->output();
} else if (auto optional_type = concrete_type->cast<OptionalType>()) {
detail::badArgType(value);
}
Graph * g = n->owningGraph();
- Value * undef_gen = g->insertNode(g->createNoneGenerator())->output();
+ Value * undef_gen = g->insertNode(g->createNone(GeneratorType::get()))->output();
n->addInput(undef_gen);
}
void addInputs(Node *n, const char * name, at::Device value) {