layers: Add threading layer generation
authorMike Stroyan <mike@LunarG.com>
Mon, 2 Nov 2015 22:30:20 +0000 (15:30 -0700)
committerMike Stroyan <stroyan@google.com>
Fri, 5 Feb 2016 18:04:48 +0000 (11:04 -0700)
Generate threading layer wrappers in thread_check.h.
Change path for generated vulkan.h.

generator.py
genvk.py
layers/CMakeLists.txt
layers/threading.cpp [new file with mode: 0644]
layers/threading.h

index b572986..740f66e 100644 (file)
@@ -252,6 +252,77 @@ class DocGeneratorOptions(GeneratorOptions):
         self.alignFuncParam  = alignFuncParam
         self.expandEnumerants = expandEnumerants
 
+# ThreadGeneratorOptions - subclass of GeneratorOptions.
+#
+# Adds options used by COutputGenerator objects during C language header
+# generation.
+#
+# Additional members
+#   prefixText - list of strings to prefix generated header with
+#     (usually a copyright statement + calling convention macros).
+#   protectFile - True if multiple inclusion protection should be
+#     generated (based on the filename) around the entire header.
+#   protectFeature - True if #ifndef..#endif protection should be
+#     generated around a feature interface in the header file.
+#   genFuncPointers - True if function pointer typedefs should be
+#     generated
+#   protectProto - True if #ifdef..#endif protection should be
+#     generated around prototype declarations
+#   protectProtoStr - #ifdef symbol to use around prototype
+#     declarations, if protected
+#   apicall - string to use for the function declaration prefix,
+#     such as APICALL on Windows.
+#   apientry - string to use for the calling convention macro,
+#     in typedefs, such as APIENTRY.
+#   apientryp - string to use for the calling convention macro
+#     in function pointer typedefs, such as APIENTRYP.
+#   indentFuncProto - True if prototype declarations should put each
+#     parameter on a separate line
+#   indentFuncPointer - True if typedefed function pointers should put each
+#     parameter on a separate line
+#   alignFuncParam - if nonzero and parameters are being put on a
+#     separate line, align parameter names at the specified column
+class ThreadGeneratorOptions(GeneratorOptions):
+    """Represents options during C interface generation for headers"""
+    def __init__(self,
+                 filename = None,
+                 apiname = None,
+                 profile = None,
+                 versions = '.*',
+                 emitversions = '.*',
+                 defaultExtensions = None,
+                 addExtensions = None,
+                 removeExtensions = None,
+                 sortProcedure = regSortFeatures,
+                 prefixText = "",
+                 genFuncPointers = True,
+                 protectFile = True,
+                 protectFeature = True,
+                 protectProto = True,
+                 protectProtoStr = True,
+                 apicall = '',
+                 apientry = '',
+                 apientryp = '',
+                 indentFuncProto = True,
+                 indentFuncPointer = False,
+                 alignFuncParam = 0):
+        GeneratorOptions.__init__(self, filename, apiname, profile,
+                                  versions, emitversions, defaultExtensions,
+                                  addExtensions, removeExtensions, sortProcedure)
+        self.prefixText      = prefixText
+        self.genFuncPointers = genFuncPointers
+        self.protectFile     = protectFile
+        self.protectFeature  = protectFeature
+        self.protectProto    = protectProto
+        self.protectProtoStr = protectProtoStr
+        self.apicall         = apicall
+        self.apientry        = apientry
+        self.apientryp       = apientryp
+        self.indentFuncProto = indentFuncProto
+        self.indentFuncPointer = indentFuncPointer
+        self.alignFuncParam  = alignFuncParam
+
+
 # OutputGenerator - base class for generating API interfaces.
 # Manages basic logic, logging, and output file control
 # Derived classes actually generate formatted output.
@@ -2182,3 +2253,353 @@ class HostSynchronizationOutputGenerator(OutputGenerator):
         self.makeThreadSafetyBlocks(cmdinfo.elem, 'param')
 
         self.writeInclude()
+
+# ThreadOutputGenerator - subclass of OutputGenerator.
+# Generates Thread checking framework
+#
+# ---- methods ----
+# ThreadOutputGenerator(errFile, warnFile, diagFile) - args as for
+#   OutputGenerator. Defines additional internal state.
+# ---- methods overriding base class ----
+# beginFile(genOpts)
+# endFile()
+# beginFeature(interface, emit)
+# endFeature()
+# genType(typeinfo,name)
+# genStruct(typeinfo,name)
+# genGroup(groupinfo,name)
+# genEnum(enuminfo, name)
+# genCmd(cmdinfo)
+class ThreadOutputGenerator(OutputGenerator):
+    """Generate specified API interfaces in a specific style, such as a C header"""
+    # This is an ordered list of sections in the header file.
+    TYPE_SECTIONS = ['include', 'define', 'basetype', 'handle', 'enum',
+                     'group', 'bitmask', 'funcpointer', 'struct']
+    ALL_SECTIONS = TYPE_SECTIONS + ['command']
+    def __init__(self,
+                 errFile = sys.stderr,
+                 warnFile = sys.stderr,
+                 diagFile = sys.stdout):
+        OutputGenerator.__init__(self, errFile, warnFile, diagFile)
+        # Internal state - accumulators for different inner block text
+        self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
+        self.intercepts = []
+
+    # Check if the parameter passed in is a pointer to an array
+    def paramIsArray(self, param):
+        return param.attrib.get('len') is not None
+
+    # Check if the parameter passed in is a pointer
+    def paramIsPointer(self, param):
+        ispointer = False
+        for elem in param:
+            #write('paramIsPointer '+elem.text, file=sys.stderr)
+            #write('elem.tag '+elem.tag, file=sys.stderr)
+            #if (elem.tail is None):
+            #    write('elem.tail is None', file=sys.stderr)
+            #else:
+            #    write('elem.tail '+elem.tail, file=sys.stderr)
+            if ((elem.tag is not 'type') and (elem.tail is not None)) and '*' in elem.tail:
+                ispointer = True
+            #    write('is pointer', file=sys.stderr)
+        return ispointer
+    def makeThreadUseBlock(self, cmd, functionprefix):
+        """Generate C function pointer typedef for <command> Element"""
+        paramdecl = ''
+        thread_check_dispatchable_objects = [
+            "VkCommandBuffer",
+            "VkDevice",
+            "VkInstance",
+            "VkQueue",
+        ]
+        thread_check_nondispatchable_objects = [
+            "VkBuffer",
+            "VkBufferView",
+            "VkCommandPool",
+            "VkDescriptorPool",
+            "VkDescriptorSetLayout",
+            "VkDeviceMemory",
+            "VkEvent",
+            "VkFence",
+            "VkFramebuffer",
+            "VkImage",
+            "VkImageView",
+            "VkPipeline",
+            "VkPipelineCache",
+            "VkPipelineLayout",
+            "VkQueryPool",
+            "VkRenderPass",
+            "VkSampler",
+            "VkSemaphore",
+            "VkShaderModule",
+        ]
+
+        # Find and add any parameters that are thread unsafe
+        params = cmd.findall('param')
+        for param in params:
+            paramname = param.find('name')
+            if False: # self.paramIsPointer(param):
+                paramdecl += '    // not watching use of pointer ' + paramname.text + '\n'
+            else:
+                externsync = param.attrib.get('externsync')
+                if externsync == 'true':
+                    if self.paramIsArray(param):
+                        paramdecl += '    for (int index=0;index<' + param.attrib.get('len') + ';index++) {\n'
+                        paramdecl += '        ' + functionprefix + 'WriteObject(my_data, ' + paramname.text + '[index]);\n'
+                        paramdecl += '    }\n'
+                    else:
+                        paramdecl += '    ' + functionprefix + 'WriteObject(my_data, ' + paramname.text + ');\n'
+                elif (param.attrib.get('externsync')):
+                    if self.paramIsArray(param):
+                        # Externsync can list pointers to arrays of members to synchronize
+                        paramdecl += '    for (int index=0;index<' + param.attrib.get('len') + ';index++) {\n'
+                        for member in externsync.split(","):
+                            # Replace first empty [] in member name with index
+                            element = member.replace('[]','[index]',1)
+                            if '[]' in element:
+                                # Replace any second empty [] in element name with
+                                # inner array index based on mapping array names like
+                                # "pSomeThings[]" to "someThingCount" array size.
+                                # This could be more robust by mapping a param member
+                                # name to a struct type and "len" attribute.
+                                limit = element[0:element.find('s[]')] + 'Count'
+                                dotp = limit.rfind('.p')
+                                limit = limit[0:dotp+1] + limit[dotp+2:dotp+3].lower() + limit[dotp+3:]
+                                paramdecl += '        for(int index2=0;index2<'+limit+';index2++)'
+                                element = element.replace('[]','[index2]')
+                            paramdecl += '        ' + functionprefix + 'WriteObject(my_data, ' + element + ');\n'
+                        paramdecl += '    }\n'
+                    else:
+                        # externsync can list members to synchronize
+                        for member in externsync.split(","):
+                            paramdecl += '    ' + functionprefix + 'WriteObject(my_data, ' + member + ');\n'
+                else:
+                    paramtype = param.find('type')
+                    if paramtype is not None:
+                        paramtype = paramtype.text
+                    else:
+                        paramtype = 'None'
+                    if paramtype in thread_check_dispatchable_objects or paramtype in thread_check_nondispatchable_objects:
+                        if self.paramIsArray(param) and ('pPipelines' != paramname.text):
+                            paramdecl += '    for (int index=0;index<' + param.attrib.get('len') + ';index++) {\n'
+                            paramdecl += '        ' + functionprefix + 'ReadObject(my_data, ' + paramname.text + '[index]);\n'
+                            paramdecl += '    }\n'
+                        elif not self.paramIsPointer(param):
+                            # Pointer params are often being created.
+                            # They are not being read from.
+                            paramdecl += '    ' + functionprefix + 'ReadObject(my_data, ' + paramname.text + ');\n'
+        explicitexternsyncparams = cmd.findall("param[@externsync]")
+        if (explicitexternsyncparams is not None):
+            for param in explicitexternsyncparams:
+                externsyncattrib = param.attrib.get('externsync')
+                paramname = param.find('name')
+                paramdecl += '// Host access to '
+                if externsyncattrib == 'true':
+                    if self.paramIsArray(param):
+                        paramdecl += 'each member of ' + paramname.text
+                    elif self.paramIsPointer(param):
+                        paramdecl += 'the object referenced by ' + paramname.text
+                    else:
+                        paramdecl += paramname.text
+                else:
+                    paramdecl += externsyncattrib
+                paramdecl += ' must be externally synchronized\n'
+
+        # Find and add any "implicit" parameters that are thread unsafe
+        implicitexternsyncparams = cmd.find('implicitexternsyncparams')
+        if (implicitexternsyncparams is not None):
+            for elem in implicitexternsyncparams:
+                paramdecl += '    // '
+                paramdecl += elem.text
+                paramdecl += ' must be externally synchronized between host accesses\n'
+
+        if (paramdecl == ''):
+            return None
+        else:
+            return paramdecl
+    def beginFile(self, genOpts):
+        OutputGenerator.beginFile(self, genOpts)
+        # C-specific
+        #
+        # Multiple inclusion protection & C++ wrappers.
+        if (genOpts.protectFile and self.genOpts.filename):
+            headerSym = '__' + re.sub('\.h', '_h_', os.path.basename(self.genOpts.filename))
+            write('#ifndef', headerSym, file=self.outFile)
+            write('#define', headerSym, '1', file=self.outFile)
+            self.newline()
+        write('#ifdef __cplusplus', file=self.outFile)
+        write('extern "C" {', file=self.outFile)
+        write('#endif', file=self.outFile)
+        self.newline()
+        #
+        # User-supplied prefix text, if any (list of strings)
+        if (genOpts.prefixText):
+            for s in genOpts.prefixText:
+                write(s, file=self.outFile)
+    def endFile(self):
+        # C-specific
+        # Finish C++ wrapper and multiple inclusion protection
+        self.newline()
+        # record intercepted procedures
+        write('// intercepts', file=self.outFile)
+        write('struct { const char* name; PFN_vkVoidFunction pFunc;} procmap[] = {', file=self.outFile)
+        write('\n'.join(self.intercepts), file=self.outFile)
+        write('};\n', file=self.outFile)
+        self.newline()
+        write('#ifdef __cplusplus', file=self.outFile)
+        write('}', file=self.outFile)
+        write('#endif', file=self.outFile)
+        if (self.genOpts.protectFile and self.genOpts.filename):
+            self.newline()
+            write('#endif', file=self.outFile)
+        # Finish processing in superclass
+        OutputGenerator.endFile(self)
+    def beginFeature(self, interface, emit):
+        #write('// starting beginFeature', file=self.outFile)
+        # Start processing in superclass
+        OutputGenerator.beginFeature(self, interface, emit)
+        # C-specific
+        # Accumulate includes, defines, types, enums, function pointer typedefs,
+        # end function prototypes separately for this feature. They're only
+        # printed in endFeature().
+        self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
+        #write('// ending beginFeature', file=self.outFile)
+    def endFeature(self):
+        # C-specific
+        # Actually write the interface to the output file.
+        #write('// starting endFeature', file=self.outFile)
+        if (self.emit):
+            self.newline()
+            if (self.genOpts.protectFeature):
+                write('#ifndef', self.featureName, file=self.outFile)
+            # If type declarations are needed by other features based on
+            # this one, it may be necessary to suppress the ExtraProtect,
+            # or move it below the 'for section...' loop.
+            #write('// endFeature looking at self.featureExtraProtect', file=self.outFile)
+            if (self.featureExtraProtect != None):
+                write('#ifdef', self.featureExtraProtect, file=self.outFile)
+            #write('#define', self.featureName, '1', file=self.outFile)
+            for section in self.TYPE_SECTIONS:
+                #write('// endFeature writing section'+section, file=self.outFile)
+                contents = self.sections[section]
+                if contents:
+                    write('\n'.join(contents), file=self.outFile)
+                    self.newline()
+            #write('// endFeature looking at self.sections[command]', file=self.outFile)
+            if (self.sections['command']):
+                write('\n'.join(self.sections['command']), end='', file=self.outFile)
+                self.newline()
+            if (self.featureExtraProtect != None):
+                write('#endif /*', self.featureExtraProtect, '*/', file=self.outFile)
+            if (self.genOpts.protectFeature):
+                write('#endif /*', self.featureName, '*/', file=self.outFile)
+        # Finish processing in superclass
+        OutputGenerator.endFeature(self)
+        #write('// ending endFeature', file=self.outFile)
+    #
+    # Append a definition to the specified section
+    def appendSection(self, section, text):
+        # self.sections[section].append('SECTION: ' + section + '\n')
+        self.sections[section].append(text)
+    #
+    # Type generation
+    def genType(self, typeinfo, name):
+        pass
+    #
+    # Struct (e.g. C "struct" type) generation.
+    # This is a special case of the <type> tag where the contents are
+    # interpreted as a set of <member> tags instead of freeform C
+    # C type declarations. The <member> tags are just like <param>
+    # tags - they are a declaration of a struct or union member.
+    # Only simple member declarations are supported (no nested
+    # structs etc.)
+    def genStruct(self, typeinfo, typeName):
+        OutputGenerator.genStruct(self, typeinfo, typeName)
+        body = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
+        # paramdecl = self.makeCParamDecl(typeinfo.elem, self.genOpts.alignFuncParam)
+        for member in typeinfo.elem.findall('.//member'):
+            body += self.makeCParamDecl(member, self.genOpts.alignFuncParam)
+            body += ';\n'
+        body += '} ' + typeName + ';\n'
+        self.appendSection('struct', body)
+    #
+    # Group (e.g. C "enum" type) generation.
+    # These are concatenated together with other types.
+    def genGroup(self, groupinfo, groupName):
+        pass
+    # Enumerant generation
+    # <enum> tags may specify their values in several ways, but are usually
+    # just integers.
+    def genEnum(self, enuminfo, name):
+        pass
+    #
+    # Command generation
+    def genCmd(self, cmdinfo, name):
+        special_functions = [
+            'vkGetDeviceProcAddr',
+            'vkGetInstanceProcAddr',
+            'vkCreateDevice',
+            'vkDestroyDevice',
+            'vkCreateInstance',
+            'vkDestroyInstance',
+            'vkEnumerateInstanceLayerProperties',
+            'vkEnumerateInstanceExtensionProperties',
+            'vkAllocateCommandBuffers',
+            'vkFreeCommandBuffers',
+            'vkCreateDebugReportCallbackEXT',
+            'vkDestroyDebugReportCallbackEXT',
+        ]
+        if name in special_functions:
+            self.intercepts += [ '    "%s", (PFN_vkVoidFunction) %s,' % (name,name) ]
+            return
+        if "KHR" in name:
+            self.appendSection('command', '// TODO - not wrapping KHR function ' + name)
+            return
+        # Determine first if this function needs to be intercepted
+        startthreadsafety = self.makeThreadUseBlock(cmdinfo.elem, 'start')
+        if startthreadsafety is None:
+            return
+        finishthreadsafety = self.makeThreadUseBlock(cmdinfo.elem, 'finish')
+        # record that the function will be intercepted
+        if (self.featureExtraProtect != None):
+            self.intercepts += [ '#ifdef %s' % self.featureExtraProtect ]
+        self.intercepts += [ '    "%s", (PFN_vkVoidFunction) %s,' % (name,name) ]
+        if (self.featureExtraProtect != None):
+            self.intercepts += [ '#endif' ]
+
+        OutputGenerator.genCmd(self, cmdinfo, name)
+        #
+        decls = self.makeCDecls(cmdinfo.elem)
+        self.appendSection('command', '')
+        self.appendSection('command', decls[0][:-1])
+        self.appendSection('command', '{')
+        # setup common to call wrappers
+        # first parameter is always dispatchable
+        dispatchable_type = cmdinfo.elem.find('param/type').text
+        dispatchable_name = cmdinfo.elem.find('param/name').text
+        self.appendSection('command', '    dispatch_key key = get_dispatch_key('+dispatchable_name+');')
+        self.appendSection('command', '    layer_data *my_data = get_my_data_ptr(key, layer_data_map);')
+        if dispatchable_type in ["VkPhysicalDevice", "VkInstance"]:
+            self.appendSection('command', '    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;')
+        else:
+            self.appendSection('command', '    VkLayerDispatchTable *pTable = my_data->device_dispatch_table;')
+        # Declare result variable, if any.
+        resulttype = cmdinfo.elem.find('proto/type')
+        if (resulttype != None and resulttype.text == 'void'):
+          resulttype = None
+        if (resulttype != None):
+            self.appendSection('command', '    ' + resulttype.text + ' result;')
+            assignresult = 'result = '
+        else:
+            assignresult = ''
+
+        self.appendSection('command', str(startthreadsafety))
+        params = cmdinfo.elem.findall('param/name')
+        paramstext = ','.join([str(param.text) for param in params])
+        API = cmdinfo.elem.attrib.get('name').replace('vk','pTable->',1)
+        self.appendSection('command', '    ' + assignresult + API + '(' + paramstext + ');')
+        self.appendSection('command', str(finishthreadsafety))
+        # Return result variable, if any.
+        if (resulttype != None):
+            self.appendSection('command', '    return result;')
+        self.appendSection('command', '}')
index 5421d41..986eb06 100755 (executable)
--- a/genvk.py
+++ b/genvk.py
@@ -23,7 +23,7 @@
 
 import sys, time, pdb, string, cProfile
 from reg import *
-from generator import write, CGeneratorOptions, COutputGenerator, DocGeneratorOptions, DocOutputGenerator, PyOutputGenerator, ValidityOutputGenerator, HostSynchronizationOutputGenerator
+from generator import write, CGeneratorOptions, COutputGenerator, DocGeneratorOptions, DocOutputGenerator, PyOutputGenerator, ValidityOutputGenerator, HostSynchronizationOutputGenerator, ThreadGeneratorOptions, ThreadOutputGenerator
 
 # debug - start header generation in debugger
 # dump - dump registry after loading
@@ -166,7 +166,7 @@ buildList = [
     # change to 'defaultExtensions = None' below.
     [ COutputGenerator,
       CGeneratorOptions(
-        filename          = '../vulkan/vulkan.h',
+        filename          = 'include/vulkan/vulkan.h',
         apiname           = 'vulkan',
         profile           = None,
         versions          = allVersions,
@@ -248,6 +248,28 @@ buildList = [
         removeExtensions  = None,
         genDirectory      = '../../doc/specs/vulkan')
     ],
+    # Vulkan 1.0 draft - thread checking layer
+    [ ThreadOutputGenerator,
+      ThreadGeneratorOptions(
+        filename          = 'thread_check.h',
+        apiname           = 'vulkan',
+        profile           = None,
+        versions          = allVersions,
+        emitversions      = allVersions,
+        defaultExtensions = 'vulkan',
+        addExtensions     = None,
+        removeExtensions  = None,
+        prefixText        = prefixStrings + vkPrefixStrings,
+        genFuncPointers   = True,
+        protectFile       = protectFile,
+        protectFeature    = False,
+        protectProto      = True,
+        protectProtoStr   = 'VK_PROTOTYPES',
+        apicall           = '',
+        apientry          = 'VKAPI_CALL ',
+        apientryp         = 'VKAPI_PTR *',
+        alignFuncParam    = 48)
+    ],
     None
 ]
 
index 0406a57..391ac29 100644 (file)
@@ -14,6 +14,13 @@ macro(run_vk_layer_generate subcmd output)
        )
 endmacro()
 
+macro(run_vk_layer_xml_generate subcmd output)
+       add_custom_command(OUTPUT ${output}
+               COMMAND ${PYTHON_CMD} ${PROJECT_SOURCE_DIR}/genvk.py -registry ${PROJECT_SOURCE_DIR}/vk.xml ${output}
+               DEPENDS ${PROJECT_SOURCE_DIR}/vk.xml ${PROJECT_SOURCE_DIR}/generator.py ${PROJECT_SOURCE_DIR}/genvk.py ${PROJECT_SOURCE_DIR}/reg.py
+       )
+endmacro()
+
 set(LAYER_JSON_FILES
     VkLayer_draw_state
     VkLayer_image
@@ -135,7 +142,7 @@ add_custom_target(generate_vk_layer_helpers DEPENDS
 )
 
 run_vk_layer_generate(object_tracker object_tracker.cpp)
-run_vk_layer_generate(threading threading.cpp)
+run_vk_layer_xml_generate(Threading thread_check.h)
 run_vk_layer_generate(unique_objects unique_objects.cpp)
 
 add_library(layer_utils SHARED vk_layer_config.cpp vk_layer_extension_utils.cpp vk_layer_utils.cpp)
@@ -155,5 +162,5 @@ add_vk_layer(param_checker param_checker.cpp vk_layer_debug_marker_table.cpp vk_
 add_vk_layer(swapchain swapchain.cpp vk_layer_table.cpp)
 # generated
 add_vk_layer(object_tracker object_tracker.cpp vk_layer_table.cpp)
-add_vk_layer(threading threading.cpp vk_layer_table.cpp)
+add_vk_layer(threading threading.cpp thread_check.h vk_layer_table.cpp)
 add_vk_layer(unique_objects unique_objects.cpp vk_layer_table.cpp)
diff --git a/layers/threading.cpp b/layers/threading.cpp
new file mode 100644 (file)
index 0000000..195f9e5
--- /dev/null
@@ -0,0 +1,388 @@
+/*
+ * Vulkan
+ *
+ * Copyright (C) 2015 Valve, Inc.
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unordered_map>
+#include <list>
+using namespace std;
+
+#include "vk_layer.h"
+#include "vk_layer_config.h"
+#include "vk_layer_extension_utils.h"
+#include "vk_layer_utils.h"
+#include "vk_enum_validate_helper.h"
+#include "vk_struct_validate_helper.h"
+#include "vk_layer_table.h"
+#include "vk_layer_logging.h"
+#include "threading.h"
+
+#include "vk_loader_platform.h"
+#include "vk_dispatch_table_helper.h"
+#include "vk_struct_string_helper_cpp.h"
+#include "vk_layer_data.h"
+
+#include "thread_check.h"
+
+static void initThreading(layer_data *my_data, const VkAllocationCallbacks *pAllocator)
+{
+
+    uint32_t report_flags = 0;
+    uint32_t debug_action = 0;
+    FILE *log_output = NULL;
+    const char *strOpt;
+    VkDebugReportCallbackEXT callback;
+    // initialize Threading options
+    report_flags = getLayerOptionFlags("ThreadingReportFlags", 0);
+    getLayerOptionEnum("ThreadingDebugAction", (uint32_t *) &debug_action);
+
+    if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
+    {
+        strOpt = getLayerOption("ThreadingLogFilename");
+        log_output = getLayerLogOutput(strOpt, "Threading");
+        VkDebugReportCallbackCreateInfoEXT dbgCreateInfo;
+        memset(&dbgCreateInfo, 0, sizeof(dbgCreateInfo));
+        dbgCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
+        dbgCreateInfo.flags = report_flags;
+        dbgCreateInfo.pfnCallback = log_callback;
+        dbgCreateInfo.pUserData = (void *) log_output;
+        layer_create_msg_callback(my_data->report_data, &dbgCreateInfo, pAllocator, &callback);
+        my_data->logging_callback.push_back(callback);
+    }
+
+    if (debug_action & VK_DBG_LAYER_ACTION_DEBUG_OUTPUT) {
+        VkDebugReportCallbackCreateInfoEXT dbgCreateInfo;
+        memset(&dbgCreateInfo, 0, sizeof(dbgCreateInfo));
+        dbgCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
+        dbgCreateInfo.flags = report_flags;
+        dbgCreateInfo.pfnCallback = win32_debug_output_msg;
+        dbgCreateInfo.pUserData = NULL;
+        layer_create_msg_callback(my_data->report_data, &dbgCreateInfo, pAllocator, &callback);
+        my_data->logging_callback.push_back(callback);
+    }
+
+    if (!threadingLockInitialized)
+    {
+        loader_platform_thread_create_mutex(&threadingLock);
+        loader_platform_thread_init_cond(&threadingCond);
+        threadingLockInitialized = 1;
+    }
+}
+
+
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
+{
+    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
+
+    assert(chain_info->u.pLayerInfo);
+    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
+    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
+    if (fpCreateInstance == NULL) {
+        return VK_ERROR_INITIALIZATION_FAILED;
+    }
+
+    // Advance the link info for the next element on the chain
+    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
+
+    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
+    if (result != VK_SUCCESS)
+        return result;
+
+    layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
+    my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
+    layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
+
+    my_data->report_data = debug_report_create_instance(
+                               my_data->instance_dispatch_table,
+                               *pInstance,
+                               pCreateInfo->enabledExtensionCount,
+                               pCreateInfo->ppEnabledExtensionNames);
+    initThreading(my_data, pAllocator);
+    return result;
+}
+
+
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks* pAllocator)
+{
+    dispatch_key key = get_dispatch_key(instance);
+    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
+    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
+    pTable->DestroyInstance(instance, pAllocator);
+
+    // Clean up logging callback, if any
+    while (my_data->logging_callback.size() > 0) {
+        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
+        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
+        my_data->logging_callback.pop_back();
+    }
+
+    layer_debug_report_destroy_instance(my_data->report_data);
+    delete my_data->instance_dispatch_table;
+    layer_data_map.erase(key);
+
+    if (layer_data_map.empty()) {
+        // Release mutex when destroying last instance.
+        loader_platform_thread_delete_mutex(&threadingLock);
+        threadingLockInitialized = 0;
+    }
+}
+
+
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice)
+{
+    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
+
+    assert(chain_info->u.pLayerInfo);
+    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
+    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
+    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
+    if (fpCreateDevice == NULL) {
+        return VK_ERROR_INITIALIZATION_FAILED;
+    }
+
+    // Advance the link info for the next element on the chain
+    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
+
+    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
+    if (result != VK_SUCCESS) {
+        return result;
+    }
+
+    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
+    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
+
+    // Setup device dispatch table
+    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
+    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
+
+    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
+    return result;
+}
+
+
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator)
+{
+    dispatch_key key = get_dispatch_key(device);
+    layer_data* dev_data = get_my_data_ptr(key, layer_data_map);
+    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
+    layer_data_map.erase(key);
+}
+
+
+VK_LAYER_EXPORT VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,  VkExtensionProperties* pProperties)
+{
+    return util_GetExtensionProperties(0, NULL, pCount, pProperties);
+}
+
+static const VkLayerProperties globalLayerProps[] = {
+    {
+        "Threading",
+        VK_API_VERSION, // specVersion
+        VK_MAKE_VERSION(0, 1, 0), // implVersion
+        "layer: Threading",
+    }
+};
+
+
+VK_LAYER_EXPORT VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,  VkLayerProperties* pProperties)
+{
+    return util_GetLayerProperties(ARRAY_SIZE(globalLayerProps), globalLayerProps, pCount, pProperties);
+}
+
+static const VkLayerProperties deviceLayerProps[] = {
+    {
+        "Threading",
+        VK_API_VERSION,
+        VK_MAKE_VERSION(0, 1, 0),
+        "layer: Threading",
+    }
+};
+VK_LAYER_EXPORT VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties* pProperties)
+{
+    return util_GetLayerProperties(ARRAY_SIZE(deviceLayerProps), deviceLayerProps, pCount, pProperties);
+}
+
+
+static inline PFN_vkVoidFunction layer_intercept_proc(const char *name)
+{
+    for (int i=0; i<sizeof(procmap)/sizeof(procmap[0]); i++) {
+        if (!strcmp(name, procmap[i].name)) return procmap[i].pFunc;
+    }
+    return NULL;
+}
+
+
+static inline PFN_vkVoidFunction layer_intercept_instance_proc(const char *name)
+{
+    if (!name || name[0] != 'v' || name[1] != 'k')
+        return NULL;
+
+    name += 2;
+    if (!strcmp(name, "CreateInstance"))
+        return (PFN_vkVoidFunction) vkCreateInstance;
+    if (!strcmp(name, "DestroyInstance"))
+        return (PFN_vkVoidFunction) vkDestroyInstance;
+    if (!strcmp(name, "EnumerateInstanceExtensionProperties"))
+        return (PFN_vkVoidFunction) vkEnumerateInstanceExtensionProperties;
+    if (!strcmp(name, "EnumerateInstanceLayerProperties"))
+        return (PFN_vkVoidFunction) vkEnumerateInstanceLayerProperties;
+    if (!strcmp(name, "EnumerateDeviceLayerProperties"))
+        return (PFN_vkVoidFunction) vkEnumerateDeviceLayerProperties;
+    if (!strcmp(name, "CreateDevice"))
+        return (PFN_vkVoidFunction) vkCreateDevice;
+    if (!strcmp(name, "GetInstanceProcAddr"))
+        return (PFN_vkVoidFunction) vkGetInstanceProcAddr;
+
+    return NULL;
+}
+
+VK_LAYER_EXPORT PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char* funcName)
+{
+    PFN_vkVoidFunction addr;
+    layer_data *dev_data;
+    if (device == VK_NULL_HANDLE) {
+        return NULL;
+    }
+
+    addr = layer_intercept_proc(funcName);
+    if (addr)
+        return addr;
+
+    dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+    VkLayerDispatchTable* pTable = dev_data->device_dispatch_table;
+
+    if (pTable->GetDeviceProcAddr == NULL)
+        return NULL;
+    return pTable->GetDeviceProcAddr(device, funcName);
+}
+
+VK_LAYER_EXPORT PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* funcName)
+{
+    PFN_vkVoidFunction addr;
+    layer_data* my_data;
+
+    if (instance == VK_NULL_HANDLE) {
+        return NULL;
+    }
+
+    addr = layer_intercept_instance_proc(funcName);
+    if (addr) {
+        return addr;
+    }
+
+    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
+    addr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
+    if (addr) {
+        return addr;
+    }
+
+    VkLayerInstanceDispatchTable* pTable = my_data->instance_dispatch_table;
+    if (pTable->GetInstanceProcAddr == NULL) {
+        return NULL;
+    }
+    return pTable->GetInstanceProcAddr(instance, funcName);
+}
+
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
+    VkInstance                                      instance,
+    const VkDebugReportCallbackCreateInfoEXT*       pCreateInfo,
+    const VkAllocationCallbacks*                    pAllocator,
+    VkDebugReportCallbackEXT*                       pMsgCallback)
+{
+    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
+    startReadObject(my_data, instance);
+    VkResult result = my_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
+    if (VK_SUCCESS == result) {
+        result = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
+    }
+    finishReadObject(my_data, instance);
+    return result;
+}
+
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
+    VkInstance                                  instance,
+    VkDebugReportCallbackEXT                    callback,
+    const VkAllocationCallbacks*                pAllocator)
+{
+    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
+    startReadObject(my_data, instance);
+    startWriteObject(my_data, callback);
+    my_data->instance_dispatch_table->DestroyDebugReportCallbackEXT(instance, callback, pAllocator);
+    layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
+    finishReadObject(my_data, instance);
+    finishWriteObject(my_data, callback);
+}
+
+VkResult VKAPI_CALL vkAllocateCommandBuffers(
+    VkDevice                                    device,
+    const VkCommandBufferAllocateInfo*          pAllocateInfo,
+    VkCommandBuffer*                            pCommandBuffers)
+{
+    dispatch_key key = get_dispatch_key(device);
+    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
+    VkLayerDispatchTable *pTable = my_data->device_dispatch_table;
+    VkResult result;
+    startReadObject(my_data, device);
+    startWriteObject(my_data, pAllocateInfo->commandPool);
+
+    result = pTable->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
+    finishReadObject(my_data, device);
+    finishWriteObject(my_data, pAllocateInfo->commandPool);
+
+    // Record mapping from command buffer to command pool
+    if (VK_SUCCESS == result) {
+        for (int index=0;index<pAllocateInfo->commandBufferCount;index++) {
+            command_pool_map[pCommandBuffers[index]] = pAllocateInfo->commandPool;
+        }
+    }
+
+    return result;
+}
+
+void VKAPI_CALL vkFreeCommandBuffers(
+    VkDevice                                    device,
+    VkCommandPool                               commandPool,
+    uint32_t                                    commandBufferCount,
+    const VkCommandBuffer*                      pCommandBuffers)
+{
+    dispatch_key key = get_dispatch_key(device);
+    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
+    VkLayerDispatchTable *pTable = my_data->device_dispatch_table;
+    const bool lockCommandPool = false; // pool is already directly locked
+    startReadObject(my_data, device);
+    startWriteObject(my_data, commandPool);
+    for (int index=0;index<commandBufferCount;index++) {
+        startWriteObject(my_data, pCommandBuffers[index], lockCommandPool);
+    }
+
+    pTable->FreeCommandBuffers(device,commandPool,commandBufferCount,pCommandBuffers);
+    finishReadObject(my_data, device);
+    finishWriteObject(my_data, commandPool);
+    for (int index=0;index<commandBufferCount;index++) {
+        finishWriteObject(my_data, pCommandBuffers[index], lockCommandPool);
+        command_pool_map.erase(pCommandBuffers[index]);
+    }
+}
+
index 32d2a9a..086459f 100644 (file)
@@ -31,6 +31,7 @@
 
 #ifndef THREADING_H
 #define THREADING_H
+#include <vector>
 #include "vk_layer_config.h"
 #include "vk_layer_logging.h"
 
@@ -42,24 +43,286 @@ typedef enum _THREADING_CHECKER_ERROR
     THREADING_CHECKER_SINGLE_THREAD_REUSE,              // Object used simultaneously by recursion in single thread
 } THREADING_CHECKER_ERROR;
 
+struct object_use_data {
+    loader_platform_thread_id thread;
+    int reader_count;
+    int writer_count;
+};
+
+struct layer_data;
+using namespace std;
+
+static int threadingLockInitialized = 0;
+static loader_platform_thread_mutex threadingLock;
+static loader_platform_thread_cond threadingCond;
+
+template <typename T> class counter {
+    public:
+    const char *typeName;
+    VkDebugReportObjectTypeEXT objectType;
+    unordered_map<T, object_use_data> uses;
+    void startWrite(debug_report_data *report_data, T object)
+    {
+        VkBool32 skipCall = VK_FALSE;
+        loader_platform_thread_id tid = loader_platform_get_thread_id();
+        loader_platform_thread_lock_mutex(&threadingLock);
+        if (uses.find(object) == uses.end()) {
+            // There is no current use of the object.  Record writer thread.
+            struct object_use_data *use_data = &uses[object];
+            use_data->reader_count = 0;
+            use_data->writer_count = 1;
+            use_data->thread = tid;
+        } else {
+            struct object_use_data *use_data = &uses[object];
+            if (use_data->reader_count == 0) {
+                // There are no readers.  Two writers just collided.
+                if (use_data->thread != tid) {
+                    skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
+                        /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
+                        "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
+                        typeName, use_data->thread, tid);
+                    if (skipCall) {
+                        // Wait for thread-safe access to object instead of skipping call.
+                        while (uses.find(object) != uses.end()) {
+                            loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
+                        }
+                        // There is now no current use of the object.  Record writer thread.
+                        struct object_use_data *use_data = &uses[object];
+                        use_data->thread = tid ;
+                        use_data->reader_count = 0;
+                        use_data->writer_count = 1;
+                    } else {
+                        // Continue with an unsafe use of the object.
+                        use_data->writer_count += 1;
+                    }
+                } else {
+                    skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
+                        /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
+                        "THREADING ERROR : object of type %s is recursively used in thread %ld",
+                        typeName, tid);
+                    // There is no way to make recursion safe.  Just forge ahead.
+                    use_data->writer_count += 1;
+                }
+            } else {
+                // There are readers.  This writer collided with them.
+                if (use_data->thread != tid) {
+                    skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
+                        /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
+                        "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
+                        typeName, use_data->thread, tid);
+                    if (skipCall) {
+                        // Wait for thread-safe access to object instead of skipping call.
+                        while (uses.find(object) != uses.end()) {
+                            loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
+                        }
+                        // There is now no current use of the object.  Record writer thread.
+                        struct object_use_data *use_data = &uses[object];
+                        use_data->thread = tid ;
+                        use_data->reader_count = 0;
+                        use_data->writer_count = 1;
+                    } else {
+                        // Continue with an unsafe use of the object.
+                        use_data->writer_count += 1;
+                    }
+                } else {
+                    skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
+                        /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
+                        "THREADING ERROR : object of type %s is recursively used in thread %ld",
+                        typeName, tid);
+                    // There is no way to make recursion safe.  Just forge ahead.
+                    use_data->writer_count += 1;
+                }
+            }
+        }
+        loader_platform_thread_unlock_mutex(&threadingLock);
+    }
+
+    void finishWrite(T object)
+    {
+        // Object is no longer in use
+        loader_platform_thread_lock_mutex(&threadingLock);
+        uses[object].writer_count -= 1;
+        if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
+            uses.erase(object);
+        }
+        // Notify any waiting threads that this object may be safe to use
+        loader_platform_thread_cond_broadcast(&threadingCond);
+        loader_platform_thread_unlock_mutex(&threadingLock);
+    }
+
+    void startRead(debug_report_data *report_data, T object) {
+        VkBool32 skipCall = VK_FALSE;
+        loader_platform_thread_id tid = loader_platform_get_thread_id();
+        loader_platform_thread_lock_mutex(&threadingLock);
+        if (uses.find(object) == uses.end()) {
+            // There is no current use of the object.  Record reader count
+            struct object_use_data *use_data = &uses[object];
+            use_data->reader_count = 1;
+            use_data->writer_count = 0;
+            use_data->thread = tid;
+        } else if (uses[object].writer_count > 0) {
+            // There is a writer of the object.
+            skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
+                /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
+                "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
+                typeName, uses[object].thread, tid);
+            if (skipCall) {
+                // Wait for thread-safe access to object instead of skipping call.
+                while (uses.find(object) != uses.end()) {
+                    loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
+                }
+                // There is no current use of the object.  Record reader count
+                struct object_use_data *use_data = &uses[object];
+                use_data->reader_count = 1;
+                use_data->writer_count = 0;
+                use_data->thread = tid;
+            } else {
+                uses[object].reader_count += 1;
+            }
+        } else {
+            // There are other readers of the object.  Increase reader count
+            uses[object].reader_count += 1;
+        }
+        loader_platform_thread_unlock_mutex(&threadingLock);
+    }
+    void finishRead(T object) {
+        loader_platform_thread_lock_mutex(&threadingLock);
+        uses[object].reader_count -= 1;
+        if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
+            uses.erase(object);
+        }
+        // Notify and waiting threads that this object may be safe to use
+        loader_platform_thread_cond_broadcast(&threadingCond);
+        loader_platform_thread_unlock_mutex(&threadingLock);
+    }
+    counter(const char *name = "",
+            VkDebugReportObjectTypeEXT type=VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
+        typeName = name;
+        objectType=type;
+    }
+};
+
 struct layer_data {
     debug_report_data *report_data;
-    VkDebugReportCallbackEXT   logging_callback;
-
-    layer_data() :
+    std::vector<VkDebugReportCallbackEXT> logging_callback;
+    VkLayerDispatchTable* device_dispatch_table;
+    VkLayerInstanceDispatchTable* instance_dispatch_table;
+    counter<VkCommandBuffer> c_VkCommandBuffer;
+    counter<VkDevice> c_VkDevice;
+    counter<VkInstance> c_VkInstance;
+    counter<VkQueue> c_VkQueue;
+    counter<VkBuffer> c_VkBuffer;
+    counter<VkBufferView> c_VkBufferView;
+    counter<VkCommandPool> c_VkCommandPool;
+    counter<VkDescriptorPool> c_VkDescriptorPool;
+    counter<VkDescriptorSet> c_VkDescriptorSet;
+    counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
+    counter<VkDeviceMemory> c_VkDeviceMemory;
+    counter<VkEvent> c_VkEvent;
+    counter<VkFence> c_VkFence;
+    counter<VkFramebuffer> c_VkFramebuffer;
+    counter<VkImage> c_VkImage;
+    counter<VkImageView> c_VkImageView;
+    counter<VkPipeline> c_VkPipeline;
+    counter<VkPipelineCache> c_VkPipelineCache;
+    counter<VkPipelineLayout> c_VkPipelineLayout;
+    counter<VkQueryPool> c_VkQueryPool;
+    counter<VkRenderPass> c_VkRenderPass;
+    counter<VkSampler> c_VkSampler;
+    counter<VkSemaphore> c_VkSemaphore;
+    counter<VkShaderModule> c_VkShaderModule;
+    counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
+    layer_data():
         report_data(nullptr),
-        logging_callback(VK_NULL_HANDLE)
+        c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
+        c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
+        c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
+        c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
+        c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
+        c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
+        c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
+        c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
+        c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
+        c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
+        c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
+        c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT),
+        c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
+        c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
+        c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
+        c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
+        c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
+        c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
+        c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
+        c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
+        c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
+        c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
+        c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
+        c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
+        c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT)
     {};
 };
 
+#define WRAPPER(type) \
+static void startWriteObject(struct layer_data *my_data, type object){my_data->c_##type.startWrite(my_data->report_data, object);}\
+static void finishWriteObject(struct layer_data *my_data, type object){my_data->c_##type.finishWrite(object);}\
+static void startReadObject(struct layer_data *my_data, type object){my_data->c_##type.startRead(my_data->report_data, object);}\
+static void finishReadObject(struct layer_data *my_data, type object){my_data->c_##type.finishRead(object);}
+
+WRAPPER(VkDevice)
+WRAPPER(VkInstance)
+WRAPPER(VkQueue)
+WRAPPER(VkBuffer)
+WRAPPER(VkBufferView)
+WRAPPER(VkCommandPool)
+WRAPPER(VkDescriptorPool)
+WRAPPER(VkDescriptorSet)
+WRAPPER(VkDescriptorSetLayout)
+WRAPPER(VkDeviceMemory)
+WRAPPER(VkEvent)
+WRAPPER(VkFence)
+WRAPPER(VkFramebuffer)
+WRAPPER(VkImage)
+WRAPPER(VkImageView)
+WRAPPER(VkPipeline)
+WRAPPER(VkPipelineCache)
+WRAPPER(VkPipelineLayout)
+WRAPPER(VkQueryPool)
+WRAPPER(VkRenderPass)
+WRAPPER(VkSampler)
+WRAPPER(VkSemaphore)
+WRAPPER(VkShaderModule)
+WRAPPER(VkDebugReportCallbackEXT)
+
 static std::unordered_map<void*, layer_data *> layer_data_map;
-static device_table_map                        threading_device_table_map;
-static instance_table_map                      threading_instance_table_map;
+static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
 
-static inline debug_report_data *mdd(const void* object)
+// VkCommandBuffer needs check for implicit use of command pool
+static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=true)
+{
+    if (lockPool) {
+        my_data->c_VkCommandPool.startWrite(my_data->report_data, command_pool_map[object]);
+    }
+    my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
+}
+static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=true)
+{
+    my_data->c_VkCommandBuffer.finishWrite(object);
+    if (lockPool) {
+        my_data->c_VkCommandPool.finishWrite(command_pool_map[object]);
+    }
+}
+static void startReadObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=false)
+{
+    if (lockPool) {
+        my_data->c_VkCommandPool.startRead(my_data->report_data, command_pool_map[object]);
+    }
+    my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
+}
+static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=false)
 {
-    dispatch_key key = get_dispatch_key(object);
-    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
-    return my_data->report_data;
+    my_data->c_VkCommandBuffer.finishRead(object);
+    if (lockPool) {
+        my_data->c_VkCommandPool.finishRead(command_pool_map[object]);
+    }
 }
 #endif // THREADING_H