auto& attribute = item.value();
// Add attribute to ModuleDef
torch::AttributeDef* attribute_def = module_def->add_attributes();
- attribute_def->set_name(attribute.name_);
- attribute_def->set_type(attribute.type->python_str());
+ attribute_def->set_name(attribute.name());
+ attribute_def->set_type(attribute.type()->python_str());
attribute_table_.push_back(*attribute.slot());
attribute_def->set_id(attribute_table_.size() - 1);
const script::NamedIValue& param,
torch::ParameterDef* param_def,
bool is_parameter) {
- param_def->set_name(param.name_);
+ param_def->set_name(param.name());
param_def->set_is_buffer(is_parameter);
param_def->set_tensor_id(addTensor(param.slot()->toTensor()));
}
return std::make_shared<SimpleValue>(m.get_or_add_parameter(v->slot()));
} else if (script::NamedIValue* v = module->find_attribute(field)) {
return std::make_shared<script::SimpleValue>(
- m.get_or_add_attribute(v->type, v->slot()));
+ m.get_or_add_attribute(v->type(), v->slot()));
} else if (Method* m = module->find_method(field)) {
return std::make_shared<MethodValue>(shared_from_this(), *m);
} else {
void createTensorToParameterNameMap(
const script::Module& module,
const QualifiedNamePtr& prefix,
- std::unordered_map<IValue*, QualifiedNamePtr>& result) {
+ std::unordered_map<script::Slot, QualifiedNamePtr>& result) {
for (const auto& elem : module.get_parameters()) {
const script::NamedIValue& param = elem.value();
- result[param.slot()] = QualifiedName::create(prefix, param.name_);
+ result[param.slot()] = QualifiedName::create(prefix, param.name());
}
for (const auto& elem : module.get_attributes()) {
const script::NamedIValue& param = elem.value();
- result[param.slot()] = QualifiedName::create(prefix, param.name_);
+ result[param.slot()] = QualifiedName::create(prefix, param.name());
}
for (const auto& elem : module.get_modules()) {
createTensorToParameterNameMap(
}
}
void printMethod(script::Method& method) {
- std::unordered_map<IValue*, QualifiedNamePtr> extra_ivalue_names;
+ std::unordered_map<script::Slot, QualifiedNamePtr> extra_ivalue_names;
createTensorToParameterNameMap(
method.owner(), QualifiedName::create("self"), extra_ivalue_names);
printMethod(method, /*is_class=*/false, extra_ivalue_names);
void printMethod(
script::Method& method,
bool is_class,
- const std::unordered_map<IValue*, QualifiedNamePtr>& extra_ivalue_names) {
+ const std::unordered_map<script::Slot, QualifiedNamePtr>& extra_ivalue_names) {
std::vector<std::string> ivalue_names = fmap(
method.initial_ivalues(),
- [&](IValue* slot) { return extra_ivalue_names.at(slot)->str(); });
+ [&](const script::Slot& slot) { return extra_ivalue_names.at(slot)->str(); });
const std::string& name = method.name();
Graph& graph = *method.graph();
auto defaults = fmap(
printFunction(graph, name, is_class, defaults, ivalue_names);
}
void printModule(script::Module& module) {
- std::unordered_map<IValue*, QualifiedNamePtr> extra_ivalue_names;
+ std::unordered_map<script::Slot, QualifiedNamePtr> extra_ivalue_names;
createTensorToParameterNameMap(
module, QualifiedName::create("self"), extra_ivalue_names);
for (auto& method : module.get_methods()) {
out << "class " << classType->name() << ":\n";
{
const auto guard = WithIndented();
- std::unordered_map<IValue*, QualifiedNamePtr> extra_ivalue_names;
+ std::unordered_map<script::Slot, QualifiedNamePtr> extra_ivalue_names;
for (auto& method : classType->methods()) {
printMethod(*method, /*is_class=*/true, extra_ivalue_names);
}
return std::make_shared<SimpleValue>(m.get_or_add_parameter(v->slot()));
} else if (NamedIValue* v = module->find_attribute(field)) {
return std::make_shared<SimpleValue>(
- m.get_or_add_attribute(v->type, v->slot()));
+ m.get_or_add_attribute(v->type(), v->slot()));
}
// This can also be a call to a non-script module, or a plain
}
static void gatherParametersAndBuffers(
- std::vector<IValue*>& values,
+ std::vector<Slot>& values,
const Module& m) {
for (auto& param : m.get_parameters()) {
values.push_back(param->slot());
}
for (auto& param : m.get_attributes()) {
- if (param->type->isSubtypeOf(TensorType::get())) {
+ if (param->type()->isSubtypeOf(TensorType::get())) {
values.push_back(param->slot());
}
}
py::tuple r(3);
IValue v = *buffer->slot();
result[i] = std::make_tuple(
- buffer.key(), buffer->type, toPyObject(std::move(v)));
+ buffer.key(), buffer->type(), toPyObject(std::move(v)));
}
return result;
})
bool force_outplace) {
// prereq: Module's buffers and parameters are unique
// this was ensured in python before calling this function
- std::vector<IValue*> parameters;
+ std::vector<Slot> parameters;
gatherParametersAndBuffers(parameters, *self);
Stack inputs = toStack(input_tuple);
- for (IValue* param : parameters) {
+ for (const Slot& param : parameters) {
inputs.emplace_back(*param);
}
auto graph = tracer::createGraphByTracing(
std::vector<std::tuple<std::shared_ptr<Module>, std::string>>
params,
std::shared_ptr<Module> orig) {
- std::vector<IValue*> member_inputs;
+ std::vector<Slot> member_inputs;
for (auto& p : params) {
NamedIValue* np = std::get<0>(p)->find_parameter(std::get<1>(p));
if (np == nullptr) {
.def(
"propagate_and_assign_input_and_output_shapes",
&Method::propagate_and_assign_input_and_output_shapes)
- .def("initial_ivalues", &Method::initial_ivalues)
+ .def("initial_ivalues",[](Method& m) {
+ std::vector<at::Tensor> tensors;
+ for (auto& t : m.initial_ivalues()) {
+ tensors.push_back(t->toTensor());
+ }
+ return tensors;
+ })
.def(
"graph_for",
[](py::args args, py::kwargs kwargs) {
#include <torch/csrc/jit/named_value.h>
#include <torch/csrc/jit/passes/shape_analysis.h>
#include <torch/csrc/jit/source_range.h>
+#include <torch/csrc/jit/script/slot.h>
+
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <torch/csrc/api/include/torch/ordered_dict.h>
std::string name,
bool optimize,
std::shared_ptr<Graph> graph,
- std::vector<IValue*> initial_members,
+ std::vector<Slot> initial_members,
std::function<void(Method&)> method_creator)
: owner_(owner),
name_(std::move(name)),
size_t num_inputs() const {
return graph()->inputs().size() - initial_ivalues_.size();
}
- TORCH_API Value* get_or_add_parameter(IValue* slot) {
+ TORCH_API Value* get_or_add_parameter(Slot slot) {
AT_ASSERT(slot->isTensor());
return get_or_add_attribute(TensorType::get(), slot);
}
- TORCH_API Value* get_or_add_attribute(TypePtr type, IValue* slot) {
+ TORCH_API Value* get_or_add_attribute(TypePtr type, Slot slot) {
auto it = initial_ivalue_index.find(slot);
if (it != initial_ivalue_index.end()) {
return graph()->inputs().at(it->second);
for (at::Tensor& i : inputs) {
stack.emplace_back(std::move(i));
}
- for (IValue* inp : initial_ivalues_) {
+ for (const Slot& inp : initial_ivalues_) {
stack.push_back(*inp);
}
const auto size = stack.size();
return retval;
}
- const std::vector<IValue*>& initial_ivalues() const {
+ const std::vector<Slot>& initial_ivalues() const {
return initial_ivalues_;
}
// each is a pointer to a slot in the module that owns this parameter
// parameters and submodules can only be _added_ to script Modules to ensure
// these pointers always stay valid
- std::vector<IValue*> initial_ivalues_;
+ std::vector<Slot> initial_ivalues_;
// map from a IValue* in initial_ivalues to the offset it appears at
// in graph. used to accelerate get_or_add_parameter
- std::unordered_map<IValue*, size_t> initial_ivalue_index;
+ std::unordered_map<Slot, size_t> initial_ivalue_index;
// TODO: support that case where we allow _writes_ to parameters from
// compiled functions.
struct NamedIValue {
NamedIValue(std::string name, TypePtr type, IValue ivalue)
: name_(name),
- type(type),
- ivalue(torch::make_unique<IValue>(std::move(ivalue))) {}
+ type_(type),
+ ivalue_(torch::make_unique<IValue>(std::move(ivalue))) {}
- IValue* slot() const {
- return ivalue.get();
+ Slot slot() const {
+ return Slot(ivalue_.get());
+ }
+ const std::string& name() const {
+ return name_;
+ }
+ const TypePtr& type() const {
+ return type_;
}
+private:
const std::string name_;
- const TypePtr type;
- std::unique_ptr<IValue> ivalue;
+ const TypePtr type_;
+ std::unique_ptr<IValue> ivalue_;
};
struct Module {
void register_buffer(const std::string& name, autograd::Variable v) {
if (auto b = attributes.find(name)) {
- AT_ASSERT(b->type->isSubtypeOf(TensorType::get()));
+ AT_ASSERT(b->type()->isSubtypeOf(TensorType::get()));
*b->slot() = v;
return;
}
Method& create_method(
const std::string& name,
std::shared_ptr<Graph> graph,
- std::vector<IValue*> member_inputs) {
+ std::vector<Slot> member_inputs) {
AT_ASSERT(graph);
std::unique_ptr<Method> method(new Method(
this,
return *methods.insert(name, std::move(method));
}
- IValue* parameter_slot(const std::string& name) const {
+ Slot parameter_slot(const std::string& name) const {
return parameters[name].slot();
}
}
NamedIValue* find_buffer(const std::string& name) {
auto b = attributes.find(name);
- if (b && b->type->isSubtypeOf(TensorType::get())) {
+ if (b && b->type()->isSubtypeOf(TensorType::get())) {
return b;
}
return nullptr;
ModuleLookup module_lookup,
// parameter_remap is needed when a parent module uses a parameter of a
// submodule
- std::unordered_map<IValue*, IValue*>& parameter_remap,
+ std::unordered_map<Slot, Slot>& parameter_remap,
std::vector<std::string> names = {}) const {
auto curr = module_lookup(names);
for (auto& kv : parameters) {
parameter_remap[kv.value().slot()] = curr->parameter_slot(kv.key());
}
for (auto& kv : attributes) {
- if (!kv.value().type->isSubtypeOf(TensorType::get())) {
+ if (!kv.value().type()->isSubtypeOf(TensorType::get())) {
continue;
}
curr->register_buffer(
names.pop_back();
}
for (auto& kv : methods) {
- std::vector<IValue*> initial_ivalues;
+ std::vector<Slot> initial_ivalues;
for (auto& p : kv.value()->initial_ivalues()) {
initial_ivalues.push_back(parameter_remap.at(p));
}
--- /dev/null
+#pragma once
+#include <ATen/core/ivalue.h>
+
+namespace torch {
+namespace jit {
+namespace script {
+
+// a stable location that can hold an IValue.
+// Currently this is internally implemented as a pointer, but when
+// modules become first-class this will be a pair of <module_ivalue, slot_number>
+struct Slot {
+ friend struct NamedIValue;
+ Slot()
+ : slot_(nullptr) {}
+ Slot(at::IValue* slot)
+ : slot_(slot) {}
+ at::IValue& operator*() const {
+ return *slot_;
+ }
+ at::IValue* operator->() const {
+ return slot_;
+ }
+ bool operator==(const Slot& rhs) const {
+ return slot_ == rhs.slot_;
+ }
+private:
+ at::IValue* slot_;
+ friend struct std::hash<Slot>;
+};
+
+}}}
+
+// slots are hashable, because they are often used as keys in maps
+// for remapping uses of a slot from one model to another
+namespace std {
+ template <>
+ struct hash<torch::jit::script::Slot> {
+ size_t operator()(const torch::jit::script::Slot& s) const noexcept {
+ return std::hash<at::IValue*>{}(s.slot_);
+ }
+ };
+} // namespace std