"-DUSE_METAL=ON"
..
- - name: Build
+ - name: Build@Win
+ if: matrix.os == 'windows-latest'
+ run: cmake --build build.common --config Release -- /m
+
+ - name: Build@MacOS
+ if: matrix.os == 'macOS-latest'
run: cmake --build build.common --config Release -j3
#ifndef COMPILER_RT_BUILTIN_FP16_H_
#define COMPILER_RT_BUILTIN_FP16_H_
+#ifdef _MSC_VER
+#pragma warning(disable : 4305 4805)
+#endif
+
#include <cstdint>
static inline uint32_t __clz(uint32_t x) {
endif(${flag_var} MATCHES "/MD")
endforeach(flag_var)
endif()
+ # Disable common MSVC warnings
+ # Integer conversion warnings(e.g. int64 to int)
+ add_compile_options(/wd4244)
+ add_compile_options(/wd4267)
+ # Signed unsigned constant comparison
+ add_compile_options(/wd4018)
+ # Aligned alloc may not met(need c++17)
+ add_compile_options(/wd4316)
+ # unreferenced local variables(usually in exception catch)
+ add_compile_options(/wd4101)
+ # always inline keyword not necessary
+ add_compile_options(/wd4180)
+ # DLL interface warning in c++
+ add_compile_options(/wd4251)
else(MSVC)
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
message(STATUS "Build in Debug mode")
void WindowsShared(const std::string& output, const std::vector<std::string>& files,
const std::string& options = "", const std::string& cc = "clang") {
std::string cmd = cc;
- cmd += " -O2 -flto=full -fuse-ld=lld-link -Wl,/EXPORT:__tvm_main__ -shared ";
+ cmd += " -O2 -flto=full -fuse-ld=lld-link -shared ";
cmd += " -o " + output;
for (const auto& file : files) {
cmd += " " + file;
*/
static ObjectPtr<DenseMapNode> Empty(uint32_t fib_shift, uint64_t n_slots) {
CHECK_GT(n_slots, uint64_t(SmallMapNode::kMaxSize));
- CHECK_EQ((n_slots & -n_slots), n_slots);
ObjectPtr<DenseMapNode> p = make_object<DenseMapNode>();
uint64_t n_blocks = CalcNumBlocks(n_slots - 1);
Block* block = p->data_ = new Block[n_blocks];
IterAdapter operator+(difference_type offset) const { return IterAdapter(iter_ + offset); }
+ IterAdapter operator-(difference_type offset) const { return IterAdapter(iter_ - offset); }
+
template <typename T = IterAdapter>
typename std::enable_if<std::is_same<iterator_category, std::random_access_iterator_tag>::value,
typename T::difference_type>::type inline
*
* \endcode
*/
-class Object {
+class TVM_DLL Object {
public:
/*!
* \brief Object deleter
* \param tindex The type index.
* \return the result.
*/
- TVM_DLL static std::string TypeIndex2Key(uint32_t tindex);
+ static std::string TypeIndex2Key(uint32_t tindex);
/*!
* \brief Get the type key hash of the corresponding index from runtime.
* \param tindex The type index.
* \return the related key-hash.
*/
- TVM_DLL static size_t TypeIndex2KeyHash(uint32_t tindex);
+ static size_t TypeIndex2KeyHash(uint32_t tindex);
/*!
* \brief Get the type index of the corresponding key from runtime.
* \param key The type key.
* \return the result.
*/
- TVM_DLL static uint32_t TypeKey2Index(const std::string& key);
+ static uint32_t TypeKey2Index(const std::string& key);
#if TVM_OBJECT_ATOMIC_REF_COUNTER
using RefCounterType = std::atomic<int32_t>;
* \param type_child_slots_can_overflow Whether to allow child to overflow the slots.
* \return The allocated type index.
*/
- TVM_DLL static uint32_t GetOrAllocRuntimeTypeIndex(const std::string& key, uint32_t static_tindex,
- uint32_t parent_tindex,
- uint32_t type_child_slots,
- bool type_child_slots_can_overflow);
+ static uint32_t GetOrAllocRuntimeTypeIndex(const std::string& key, uint32_t static_tindex,
+ uint32_t parent_tindex, uint32_t type_child_slots,
+ bool type_child_slots_can_overflow);
// reference counter related operations
/*! \brief developer function, increases reference counter. */
* \param parent_tindex The parent type index.
* \return The derivation results.
*/
- TVM_DLL bool DerivedFrom(uint32_t parent_tindex) const;
+ bool DerivedFrom(uint32_t parent_tindex) const;
// friend classes
template <typename>
friend class ObjAllocatorBase;
/*!
* \brief Base class of all operation nodes
*/
-class OperationNode : public Object {
+class TVM_DLL OperationNode : public Object {
public:
/*! \brief optional name of the operation */
std::string name;
TVM_DECLARE_FINAL_OBJECT_INFO(BijectiveLayoutNode, Object);
};
-/*! \brief Bijective function mapping for data layout transformation.
+/*!
+ * \brief Bijective function mapping for data layout transformation.
* Given two Layout, BijectiveLayout build and store the mapping rules,
- * provides API to transform N-dimention tensor from the source indices (i0, i1, …, im)
- * to the destination indices (j0, j1, … jm).
+ * provides API to transform N-dimention tensor from the source indices (i0, i1, .., im)
+ * to the destination indices (j0, j1, .., jm).
*/
class BijectiveLayout : public ObjectRef {
public:
if obj.endswith(".o"):
link_cmd += [obj]
- link_cmd += ["-EXPORT:__tvm_main__"]
link_cmd += [temp_path + "dllmain.obj"]
link_cmd += ["-out:" + output]
Entry b = VisitExpr(op->args[1]);
// a c x / c -> a x
if (b.is_const()) {
- return DivByConst(op->args[0], 1 << b.base, true);
+ return DivByConst(op->args[0], static_cast<int64_t>(1) << b.base, true);
}
return Everything();
}
}
return it->second;
}
- template <typename T>
- void ParseValue(const char* key, T* value) const {
+
+ void ParseDouble(const char* key, double* value) const {
std::istringstream is(GetValue(key));
if (is.str() == "inf") {
- *value = std::numeric_limits<T>::infinity();
+ *value = std::numeric_limits<double>::infinity();
} else if (is.str() == "-inf") {
- *value = -std::numeric_limits<T>::infinity();
+ *value = -std::numeric_limits<double>::infinity();
} else {
is >> *value;
if (is.fail()) {
}
}
}
- void Visit(const char* key, double* value) final { ParseValue(key, value); }
+
+ template <typename T>
+ void ParseValue(const char* key, T* value) const {
+ std::istringstream is(GetValue(key));
+ is >> *value;
+ if (is.fail()) {
+ LOG(FATAL) << "Wrong value format for field " << key;
+ }
+ }
+ void Visit(const char* key, double* value) final { ParseDouble(key, value); }
void Visit(const char* key, int64_t* value) final { ParseValue(key, value); }
void Visit(const char* key, uint64_t* value) final { ParseValue(key, value); }
void Visit(const char* key, int* value) final { ParseValue(key, value); }
// Current state;
State state_;
// Initialize remote header
- bool init_header_step_{0};
+ int init_header_step_{0};
// Whether current handler is client or server mode.
bool client_mode_{false};
// Whether current handler is in the async server mode.
global->setAlignment(1);
#endif
global->setInitializer(llvm::ConstantDataArray::getString(*ctx_, entry_func_name));
+ global->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
}
std::unique_ptr<llvm::Module> CodeGenCPU::Finish() {
*/
#ifndef TVM_TARGET_LLVM_LLVM_COMMON_H_
#define TVM_TARGET_LLVM_LLVM_COMMON_H_
+
+#ifdef _MSC_VER
+#pragma warning(disable : 4141 4291)
+#endif
+
#ifdef TVM_LLVM_VERSION
#include <llvm/Analysis/TargetTransformInfo.h>
// No reverse dependencies means that the output does not depend on this tensor,
// return a zero tensor of the appropriate shape
// (i.e., output shape + tensor shape, aka shape of Jacobian)
- Array<PrimExpr> result_shape(head->shape.begin(),
- head->shape.end() + (-output->shape.size()));
+ Array<PrimExpr> result_shape(head->shape.begin(), head->shape.end() - output->shape.size());
for (auto e : tensor->shape) {
result_shape.push_back(e);
}