From: lcskrishna Date: Thu, 6 Dec 2018 07:52:42 +0000 (-0800) Subject: Fixed MIOpen RNN Segfault issue and enabled RNN test (#14810) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~2426 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=12addc64a6a6787c224e4d96057a797ca7de1535;p=platform%2Fupstream%2Fpytorch.git Fixed MIOpen RNN Segfault issue and enabled RNN test (#14810) Summary: This pull request contains changes for: 1. Added MIOpen RNN API miopenGetRNNLayerBiasSize and miopenGetRNNLayerParamSize. 2. Fixed usage of API miopenGetRNNLayerParam. 3. Modifying the RNN test to run using MIOpen engine. Differential Revision: D13355699 Pulled By: bddppq fbshipit-source-id: 6f750657f8049c5446eca893880b397804120b69 --- diff --git a/caffe2/operators/rnn/hip/recurrent_op_miopen.hip b/caffe2/operators/rnn/hip/recurrent_op_miopen.hip index 34c96a2..5900a08 100644 --- a/caffe2/operators/rnn/hip/recurrent_op_miopen.hip +++ b/caffe2/operators/rnn/hip/recurrent_op_miopen.hip @@ -379,8 +379,19 @@ bool RecurrentParamAccessOp::RunOnDevice() { miopenTensorDescriptor_t biasDesc; MIOPEN_ENFORCE(miopenCreateTensorDescriptor(&biasDesc)); - void* bias; + size_t bias_size = 0; + MIOPEN_ENFORCE(miopenGetRNNLayerBiasSize( + miopen_wrapper_.inline_miopen_handle(), + rnnDesc_, + layer, + param_id, + &bias_size)); + + void* bias; + miopen_wrapper_.with_miopen_state(0, [&](MIOPENState* state) { + bias = state->workspace().get(bias_size); + }); MIOPEN_ENFORCE(miopenGetRNNLayerBias( miopen_wrapper_.inline_miopen_handle(), rnnDesc_, @@ -390,10 +401,9 @@ bool RecurrentParamAccessOp::RunOnDevice() { Input(1).template data(), param_id, biasDesc, - &bias)); - int numBiasDims; - std::vector biasDims; - std::vector strideDims; + bias)); + std::array biasDims {1,1,1}; + std::array strideDims {1,1,1}; miopenDataType_t dt; MIOPEN_ENFORCE(miopenGetTensorDescriptor( @@ -419,7 +429,20 @@ bool RecurrentParamAccessOp::RunOnDevice() { weight_constants[param_type] + 4 * (input_type == "recurrent"); miopenTensorDescriptor_t matrixParamDesc; MIOPEN_ENFORCE(miopenCreateTensorDescriptor(&matrixParamDesc)); + + size_t param_size = 0; + MIOPEN_ENFORCE(miopenGetRNNLayerParamSize( + miopen_wrapper_.inline_miopen_handle(), + rnnDesc_, + layer, + xDesc_->descs()[0], + param_id, + ¶m_size)); + void* pmatrix; + miopen_wrapper_.with_miopen_state(0, [&](MIOPENState* state) { + pmatrix = state->workspace().get(param_size); + }); MIOPEN_ENFORCE(miopenGetRNNLayerParam( miopen_wrapper_.inline_miopen_handle(), rnnDesc_, @@ -429,15 +452,14 @@ bool RecurrentParamAccessOp::RunOnDevice() { Input(1).template data(), param_id, matrixParamDesc, - &pmatrix)); - int numDims; - std::vector matDims; - std::vector strideDims; + pmatrix)); + std::array matDims {1,1,1}; + std::array strideDims {1,1,1}; miopenDataType_t dt; MIOPEN_ENFORCE(miopenGetTensorDescriptor( matrixParamDesc, &dt, matDims.data(), strideDims.data())); - CAFFE_ENFORCE_EQ(numDims, 3); + CAFFE_ENFORCE_EQ(matDims.size(), 3); if (mode == SET_PARAM) { CAFFE_ENFORCE_EQ(matDims[0] * matDims[1] * matDims[2], Input(2).size()); context_.template CopySameDevice( diff --git a/caffe2/python/operator_test/cudnn_recurrent_test.py b/caffe2/python/operator_test/cudnn_recurrent_test.py index 40fe88b..46162f6 100644 --- a/caffe2/python/operator_test/cudnn_recurrent_test.py +++ b/caffe2/python/operator_test/cudnn_recurrent_test.py @@ -11,11 +11,11 @@ import numpy as np import unittest -@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.") +@unittest.skipIf((not workspace.has_gpu_support) and (not workspace.has_hip_support), "No gpu support.") class TestLSTMs(unittest.TestCase): def testEqualToCudnn(self): - with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA)): + with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType)): T = 8 batch_size = 4 input_dim = 8