2 # Copyright (c) 2015-2016 The Khronos Group Inc.
3 # Copyright (c) 2015-2016 Valve Corporation
4 # Copyright (c) 2015-2016 LunarG, Inc.
5 # Copyright (c) 2015-2016 Google Inc.
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
11 # http://www.apache.org/licenses/LICENSE-2.0
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
19 # Author: Tobin Ehlis <tobin@lunarg.com>
27 # vk_layer_documentation_generate.py overview
28 # This script is intended to generate documentation based on vulkan layers
29 # It parses known validation layer headers for details of the validation checks
30 # It parses validation layer source files for specific code where checks are implemented
31 # structs in a human-readable txt format, as well as utility functions
32 # to print enum values as strings
34 # NOTE : Initially the script is performing validation of a hand-written document
35 # Right now it does 3 checks:
36 # 1. Verify ENUM codes declared in source are documented
37 # 2. Verify ENUM codes in document are declared in source
38 # 3. Verify API function names in document are in the actual API header (vulkan.py)
39 # Currently script will flag errors in all of these cases
41 # TODO : Need a formal specification of the syntax for doc generation
42 # Initially, these are the basics:
43 # 1. Validation checks have unique ENUM values defined in validation layer header
44 # 2. ENUM includes comments for 1-line overview of check and more detailed description
45 # 3. Actual code implementing checks includes ENUM value in callback
46 # 4. Code to test checks should include reference to ENUM
49 # TODO : Need list of known validation layers to use as default input
50 # Just a couple of flat lists right now, but may need to make this input file
51 # or at least a more dynamic data structure
52 layer_inputs = { 'draw_state' : {'header' : 'layers/core_validation_error_enums.h',
53 'source' : 'layers/core_validation.cpp',
55 'error_enum' : 'DRAW_STATE_ERROR'},
56 'shader_checker' : {'header' : 'layers/core_validation_error_enums.h',
57 'source' : 'layers/core_validation.cpp',
59 'error_enum' : 'SHADER_CHECKER_ERROR'},
60 'mem_tracker' : {'header' : 'layers/core_validation_error_enums.h',
61 'source' : 'layers/core_validation.cpp',
63 'error_enum' : 'MEM_TRACK_ERROR'},
64 'device_limits' : {'header' : 'layers/core_validation_error_enums.h',
65 'source' : 'layers/core_validation.cpp',
67 'error_enum' : 'DEV_LIMITS_ERROR',},
68 'object_tracker' : {'header' : 'layers/object_tracker.h',
69 'source' : 'layers/object_tracker.cpp',
71 'error_enum' : 'OBJECT_TRACK_ERROR',},
72 'threading' : {'header' : 'layers/threading.h',
73 'source' : 'dbuild/layers/threading.cpp',
75 'error_enum' : 'THREADING_CHECKER_ERROR'},
76 'image' : {'header' : 'layers/image.h',
77 'source' : 'layers/image.cpp',
79 'error_enum' : 'IMAGE_ERROR',},
80 'swapchain' : {'header' : 'layers/swapchain.h',
81 'source' : 'layers/swapchain.cpp',
83 'error_enum' : 'SWAPCHAIN_ERROR',},
84 'parameter_validation' : {'header' : 'layers/parameter_validation_utils.h',
85 'source' : 'layers/parameter_validation.cpp',
87 'error_enum' : 'ErrorCode',},
90 builtin_headers = [layer_inputs[ln]['header'] for ln in layer_inputs]
91 builtin_source = [layer_inputs[ln]['source'] for ln in layer_inputs]
92 builtin_tests = ['tests/layer_validation_tests.cpp', ]
94 # List of extensions in layers that are included in documentation, but not in vulkan.py API set
95 layer_extension_functions = ['objTrackGetObjects', 'objTrackGetObjectsOfType']
98 parser = argparse.ArgumentParser(description='Generate layer documenation from source.')
99 parser.add_argument('--in_headers', required=False, default=builtin_headers, help='The input layer header files from which code will be generated.')
100 parser.add_argument('--in_source', required=False, default=builtin_source, help='The input layer source files from which code will be generated.')
101 parser.add_argument('--test_source', required=False, default=builtin_tests, help='The input test source files from which code will be generated.')
102 parser.add_argument('--layer_doc', required=False, default='layers/vk_validation_layer_details.md', help='Existing layer document to be validated against actual layers.')
103 parser.add_argument('--validate', action='store_true', default=False, help='Validate that there are no mismatches between layer documentation and source. This includes cross-checking the validation checks, and making sure documented Vulkan API calls exist.')
104 parser.add_argument('--print_structs', action='store_true', default=False, help='Primarily a debug option that prints out internal data structs used to generate layer docs.')
105 parser.add_argument('--print_doc_checks', action='store_true', default=False, help='Primarily a debug option that prints out all of the checks that are documented.')
106 return parser.parse_args()
108 # Little helper class for coloring cmd line output
112 self.GREEN = '\033[0;32m'
113 self.RED = '\033[0;31m'
114 self.YELLOW = '\033[1;33m'
115 self.ENDC = '\033[0m'
116 if 'Linux' != platform.system():
134 # Class to parse the validation layer test source and store testnames
136 def __init__(self, test_file_list, test_group_name=['VkLayerTest', 'VkWsiEnabledLayerTest']):
137 self.test_files = test_file_list
138 self.tests_set = set()
139 self.test_trigger_txt_list = []
140 for tg in test_group_name:
141 self.test_trigger_txt_list.append('TEST_F(%s' % tg)
142 #print('Test trigger test list: %s' % (self.test_trigger_txt_list))
144 # Parse test files into internal data struct
146 # For each test file, parse test names into set
147 grab_next_line = False # handle testname on separate line than wildcard
148 for test_file in self.test_files:
149 with open(test_file) as tf:
151 if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
154 if True in [ttt in line for ttt in self.test_trigger_txt_list]:
155 #print('Test wildcard in line: %s' % (line))
156 testname = line.split(',')[-1]
157 testname = testname.strip().strip(' {)')
158 #print('Inserting test: "%s"' % (testname))
160 grab_next_line = True
162 self.tests_set.add(testname)
163 if grab_next_line: # test name on its own line
164 grab_next_line = False
165 testname = testname.strip().strip(' {)')
166 self.tests_set.add(testname)
168 # Class to parse the layer source code and store details in internal data structs
170 def __init__(self, header_file_list, source_file_list):
171 self.header_files = header_file_list
172 self.source_files = source_file_list
176 # Parse layer header files into internal dict data structs
178 # For each header file, parse details into dicts
179 # TODO : Should have a global dict element to track overall list of checks
181 for layer_name in layer_inputs:
182 hf = layer_inputs[layer_name]['header']
183 self.layer_dict[layer_name] = {} # initialize a new dict for this layer
184 self.layer_dict[layer_name]['CHECKS'] = [] # enum of checks is stored in a list
185 #print('Parsing header file %s as layer name %s' % (hf, layer_name))
188 if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
189 #print("Skipping comment line: %s" % line)
190 # For now skipping lines starting w/ comment, may use these to capture
191 # documentation in the future
195 if '}' in line: # we're done with enum definition
198 # grab the enum name as a unique check
200 # TODO : When documentation for a check is contained in the source,
201 # this is where we should also capture that documentation so that
202 # it can then be transformed into desired doc format
203 enum_name = line.split(',')[0].strip()
204 # Flag an error if we have already seen this enum
205 if enum_name in self.layer_dict[layer_name]['CHECKS']:
206 print('ERROR : % layer has duplicate error enum: %s' % (layer_name, enum_name))
207 self.layer_dict[layer_name]['CHECKS'].append(enum_name)
208 # If the line includes 'enum' and the expected enum name, start capturing enums
209 if False not in [ex in line for ex in ['enum', layer_inputs[layer_name]['error_enum']]]:
212 # For each source file, parse into dicts
213 for sf in self.source_files:
214 #print('Parsing source file %s' % sf)
216 # TODO : In the source file we want to see where checks actually occur
217 # Need to build function tree of checks so that we know all of the
218 # checks that occur under a top-level Vulkan API call
219 # Eventually in the validation we can flag ENUMs that aren't being
220 # used in the source, and we can document source code lines as well
221 # as Vulkan API calls where each specific ENUM check is made
223 def print_structs(self):
224 print('This is where I print the data structs')
225 for layer in self.layer_dict:
226 print('Layer %s has %i checks:\n%s' % (layer, len(self.layer_dict[layer]['CHECKS'])-1, "\n\t".join(self.layer_dict[layer]['CHECKS'])))
228 # Class to parse hand-written md layer documentation into a dict and then validate its contents
230 def __init__(self, source_file):
231 self.layer_doc_filename = source_file
232 self.txt_color = bcolors()
233 # Main data struct to store info from layer doc
234 self.layer_doc_dict = {}
235 # Comprehensive list of all validation checks recorded in doc
238 # Parse the contents of doc into data struct
241 parse_layer_details = False
242 detail_trigger = '| Check | '
243 parse_pending_work = False
244 pending_trigger = ' Pending Work'
245 parse_overview = False
246 overview_trigger = ' Overview'
249 with open(self.layer_doc_filename) as f:
251 if parse_pending_work:
252 if '.' in line and line.strip()[0].isdigit():
253 todo_item = line.split('.')[1].strip()
254 self.layer_doc_dict[layer_name]['pending'].append(todo_item)
255 if pending_trigger in line and '##' in line:
256 parse_layer_details = False
257 parse_pending_work = True
258 parse_overview = False
259 self.layer_doc_dict[layer_name]['pending'] = []
260 if parse_layer_details:
261 # Grab details but skip the fomat line with a bunch of '-' chars
262 if '|' in line and line.count('-') < 20:
263 detail_sections = line.split('|')
264 #print("Details elements from line %s: %s" % (line, detail_sections))
265 check_name = '%s%s' % (enum_prefix, detail_sections[3].strip())
267 self.enum_list.append(check_name)
268 self.layer_doc_dict[layer_name][check_name] = {}
269 self.layer_doc_dict[layer_name][check_name]['summary_txt'] = detail_sections[1].strip()
270 self.layer_doc_dict[layer_name][check_name]['details_txt'] = detail_sections[2].strip()
271 self.layer_doc_dict[layer_name][check_name]['api_list'] = detail_sections[4].split()
272 self.layer_doc_dict[layer_name][check_name]['tests'] = detail_sections[5].split()
273 self.layer_doc_dict[layer_name][check_name]['notes'] = detail_sections[6].strip()
274 # strip any unwanted commas from api and test names
275 self.layer_doc_dict[layer_name][check_name]['api_list'] = [a.strip(',') for a in self.layer_doc_dict[layer_name][check_name]['api_list']]
276 test_list = [a.strip(',') for a in self.layer_doc_dict[layer_name][check_name]['tests']]
277 self.layer_doc_dict[layer_name][check_name]['tests'] = [a.split('.')[-1] for a in test_list]
278 # Trigger details parsing when we have table header
279 if detail_trigger in line:
280 parse_layer_details = True
281 parse_pending_work = False
282 parse_overview = False
283 enum_txt = line.split('|')[3]
285 enum_prefix = enum_txt.split()[-1].strip('*').strip()
286 #print('prefix: %s' % enum_prefix)
288 self.layer_doc_dict[layer_name]['overview'] += line
289 if overview_trigger in line and '##' in line:
290 parse_layer_details = False
291 parse_pending_work = False
292 parse_overview = True
293 layer_name = line.split()[1]
294 self.layer_doc_dict[layer_name] = {}
295 self.layer_doc_dict[layer_name]['overview'] = ''
297 # Verify that checks, tests and api references in layer doc match reality
298 # Report API calls from doc that are not found in API
299 # Report checks from doc that are not in actual layers
300 # Report checks from layers that are not captured in doc
301 # Report checks from doc that do not have a valid test
302 def validate(self, layer_dict, tests_set):
303 #print("tests_set: %s" % (tests_set))
304 # Count number of errors found and return it
307 # A few checks that are allowed to not have tests
308 no_test_checks = ['DRAWSTATE_INTERNAL_ERROR', 'DRAWSTATE_OUT_OF_MEMORY', 'MEMTRACK_INTERNAL_ERROR', 'OBJTRACK_INTERNAL_ERROR']
309 # First we'll go through the doc datastructures and flag any issues
310 for chk in self.enum_list:
311 doc_layer_found = False
312 for real_layer in layer_dict:
313 if chk in layer_dict[real_layer]['CHECKS']:
314 #print('Found actual layer check %s in doc' % (chk))
315 doc_layer_found = True
317 if not doc_layer_found:
318 print(self.txt_color.red() + 'Actual layers do not contain documented check: %s' % (chk) + self.txt_color.endc())
321 # Now go through API names in doc and verify they're real
322 # First we're going to transform proto names from vulkan.py into single list
323 core_api_names = [p.name for p in vulkan.core.protos]
324 wsi_s_names = [p.name for p in vulkan.ext_khr_surface.protos]
325 wsi_ds_names = [p.name for p in vulkan.ext_khr_device_swapchain.protos]
326 dbg_rpt_names = [p.name for p in vulkan.ext_debug_report.protos]
327 api_names = core_api_names + wsi_s_names + wsi_ds_names + dbg_rpt_names
328 for ln in self.layer_doc_dict:
329 for chk in self.layer_doc_dict[ln]:
330 if chk in ['overview', 'pending']:
332 for api in self.layer_doc_dict[ln][chk]['api_list']:
333 if api[2:] not in api_names and api not in layer_extension_functions:
334 print(self.txt_color.red() + 'Doc references invalid function: %s' % (api) + self.txt_color.endc())
336 # For now warn on missing or invalid tests
337 for test in self.layer_doc_dict[ln][chk]['tests']:
339 # naive way to handle wildcards, just make sure we have matches on parts
340 test_parts = test.split('*')
341 for part in test_parts:
348 print(self.txt_color.red() + 'Validation check %s has missing or invalid test : %s' % (chk, test))
351 elif test not in tests_set and not chk.endswith('_NONE'):
353 if chk not in no_test_checks:
356 print(self.txt_color.red() + 'Validation check %s has missing or invalid test : %s' % (chk, test))
358 # Now go through all of the actual checks in the layers and make sure they're covered in the doc
359 for ln in layer_dict:
360 for chk in layer_dict[ln]['CHECKS']:
361 if chk not in self.enum_list:
362 print(self.txt_color.red() + 'Doc is missing check: %s' % (chk) + self.txt_color.endc())
365 return (errors_found, warnings_found)
367 # Print all of the checks captured in the doc
368 def print_checks(self):
369 print('Checks captured in doc:\n%s' % ('\n\t'.join(self.enum_list)))
374 # Create parser for layer files
375 layer_parser = LayerParser(opts.in_headers, opts.in_source)
376 # Parse files into internal data structs
379 test_parser = TestParser(opts.test_source)
382 # Generate requested types of output
383 if opts.print_structs: # Print details of internal data structs
384 layer_parser.print_structs()
386 layer_doc = LayerDoc(opts.layer_doc)
388 if opts.print_doc_checks:
389 layer_doc.print_checks()
392 (num_errors, num_warnings) = layer_doc.validate(layer_parser.layer_dict, test_parser.tests_set)
393 txt_color = bcolors()
394 if (0 == num_warnings):
395 print(txt_color.green() + 'No warning cases found between %s and implementation' % (os.path.basename(opts.layer_doc)) + txt_color.endc())
397 print(txt_color.yellow() + 'Found %s warnings due to missing tests. Missing tests are labeled as "TODO" in "%s."' % (num_warnings, opts.layer_doc))
398 if (0 == num_errors):
399 print(txt_color.green() + 'No mismatches found between %s and implementation' % (os.path.basename(opts.layer_doc)) + txt_color.endc())
404 if __name__ == "__main__":