}
/// Read the MME header of a general sparse matrix of type real.
-static void readMMEHeader(FILE *file, char *name, uint64_t *idata) {
+static void readMMEHeader(FILE *file, char *name, uint64_t *idata,
+ bool *is_symmetric) {
char line[1025];
char header[64];
char object[64];
fprintf(stderr, "Corrupt header in %s\n", name);
exit(1);
}
+ *is_symmetric = (strcmp(toLower(symmetry), "symmetric") == 0);
// Make sure this is a general sparse matrix.
if (strcmp(toLower(header), "%%matrixmarket") ||
strcmp(toLower(object), "matrix") ||
strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") ||
- strcmp(toLower(symmetry), "general")) {
+ (strcmp(toLower(symmetry), "general") && !(*is_symmetric))) {
fprintf(stderr,
"Cannot find a general sparse matrix with type real in %s\n", name);
exit(1);
}
// Perform some file format dependent set up.
uint64_t idata[512];
+ bool is_symmetric = false;
if (strstr(filename, ".mtx")) {
- readMMEHeader(file, filename, idata);
+ readMMEHeader(file, filename, idata, &is_symmetric);
} else if (strstr(filename, ".tns")) {
readExtFROSTTHeader(file, filename, idata);
} else {
exit(1);
}
tensor->add(indices, value);
+ // We currently chose to deal with symmetric matrices by fully constructing
+ // them. In the future, we may want to make symmetry implicit for storage
+ // reasons.
+ if (is_symmetric && indices[0] != indices[1])
+ tensor->add({indices[1], indices[0]}, value);
}
// Close the file and return tensor.
fclose(file);
# Copy test data over.
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/mttkrp_b.tns
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/test.mtx
+ ${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/test_symmetric.mtx
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/test.tns
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/wide.mtx
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/zero.mtx
// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
// RUN: --std-bufferize --finalizing-bufferize --lower-affine \
// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \
-// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
+// RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
// RUN: --std-bufferize --finalizing-bufferize --lower-affine \
// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \
-// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
+// RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// Print the result for verification.
//
- // CHECK: 28.2
+ // CHECK: 30.2
//
%m = memref.buffer_cast %0 : memref<f64>
%v = memref.load %m[] : memref<f64>