#!/usr/bin/env python # coding: utf-8 # ### Update Slicer CLI buildsystem to download input and baseline using ExternalData # # This notebook was specifically designed to process the a Slicer source tree (based of r28333) # and perform the following updates: # * Copy data from directories `Slicer/Testing/Data/Input` and `Slicer/MRML/Core/Testing/TestData` directories # into each CLI `Slicer/Modules/CLI//Data/(BaseLine|Input)` where they are used. # * Update the `Slicer/Modules/CLI//Testing/CMakeLists.txt` of each CLI to use `ExternalData_add_test`. # * Upload data to Midas (see http://slicer.kitware.com/midas3/folder/301) and check data can effectively be downloaded given a checksum. # * Identify list of files that could potentially be removed from the source tree. # * Identify list of CLI `Testing/CMakeLists.txt` missing the setting of `FOLDER` target property. # # In[1]: import hashlib import glob import os import re import shutil get_ipython().run_line_magic('reload_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '1') get_ipython().run_line_magic('aimport', 'pydas') #--------------------------------------------------------------------------- slicer_src = '/home/jcfr/Projects/Slicer-Qt5-VTK8' cli_src_directory = os.path.join(slicer_src, 'Modules/CLI') # The goal is to remove this global directory and have every modules and libraries # listing their own requirements regarding input test and baseline data. global_test_data_dir = '/home/jcfr/Projects/Slicer-Qt5-VTK8/Testing/Data/Input' global_mrml_test_data_dir = '/home/jcfr/Projects/Slicer-Qt5-VTK8/Libs/MRML/Core/Testing/TestData' global_datafiles_candidate_for_removal = [] # Folder id associated Slicer data hosted on http://slicer.kitware.com/midas3 midas_data_folder_id = 301 midas_url = 'http://slicer.kitware.com/midas3' email = os.environ["MIDAS_PACKAGE_EMAIL"] apikey = os.environ["MIDAS_PACKAGE_API_KEY"] # Update sys.path adding Slicer python module directory slicer_python_dir = os.path.join(slicer_src, 'Base/Python/slicer/release') import sys if slicer_python_dir not in sys.path: sys.path.append(slicer_python_dir) import midasdata # If enabled, update midas creating missing folder and uploading data if needed. midas_update = False # If enabled, this option will connect to the midas server and check # if the current CLI test data have been uploaded. check_data_is_uploaded = False # If enabled, generate hash files for each data files, rename the original # data file with extension '.bkp' # If false, revert the previous step using .bkp files (if any) convert_to_hashfiles = True delete_bkp = True # Disable replacement of 'add_test' with 'ExternalData_add_target' replace_add_test = True # Map of hashing algos to consider when creating "content links" hash_functions = { 'md5': hashlib.md5, #'sha256': hashlib.sha256, #'sha512': hashlib.sha512, } # List of data files to exclude filenames_to_exclude = ['Brain_slice.nrrd'] # List of data file extensions that should NOT be considered extensions_to_exclude = ['.tfm', '.txt', '.bkp', '.mrml', '.fcsv'] + [".%s" % algo for algo in hash_functions.keys()] #--------------------------------------------------------------------------- # Initialize global vars if midas_update or check_data_is_uploaded: communicator = pydas.core.Communicator(midas_url) def token(email, apikey): return communicator.login_with_api_key(email, apikey) midas_data_folders = communicator.folder_children(token(email, apikey), midas_data_folder_id) modulesIndex = midasdata._getFolderIndex(midas_data_folders, "Modules") modulesID = midasdata._getIDfromIndex(midas_data_folders, "folder", modulesIndex) availableModules = communicator.folder_children(token(email, apikey), modulesID) availableModulesFolders = availableModules["folders"] moduleNames = [] MODULE_INPUT_SUBDIR = 'Data/Input' MODULE_BASELINE_SUBDIR = 'Data/Baseline' #--------------------------------------------------------------------------- def get_module_data_directories(cli_src_directory): data_directories = [] for root, dirnames, filenames in os.walk(cli_src_directory): if root.endswith(MODULE_INPUT_SUBDIR): data_directories.append(root) if root.endswith(MODULE_BASELINE_SUBDIR): data_directories.append(root) return sorted(data_directories) #--------------------------------------------------------------------------- def check_module_test_data_are_uploaded(cli_src_directory): # Maps of moduleName to filename that need to uploaded modules_missing_data = {} # Collect data directories data_directories = get_module_data_directories(cli_src_directory) for data_directory in data_directories: moduleName = data_directory.split('/')[-3] # Skip 'ExtractSkeleton' because it has already been updated #if moduleName == 'ExtractSkeleton': # continue # Check if existing data file have been uploaded on Midas for filename in os.listdir(data_directory): filepath = os.path.join(data_directory, filename) if os.path.splitext(filename)[1] in extensions_to_exclude: #print("[%s] %s [skipping upload: extension excluded]" % (moduleName, filepath)) continue digest = md5(filepath) uploaded = has_bitstream(filename, digest) print("[%s] %s => %s [uploaded: %s]" % (moduleName, filepath, digest, uploaded)) if not uploaded: if moduleName not in modules_missing_data: modules_missing_data[moduleName] = [] modules_missing_data[moduleName].append(filepath) return modules_missing_data #--------------------------------------------------------------------------- def has_folder_property(cmakelist): """CMakeLists.txt of CLIs are expected to have a line like this one:: set_target_properties(${CLP}Data PROPERTIES FOLDER ${${CLP}_TARGETS_FOLDER}) """ has_property = False with open(cmakelist, 'r') as myfile: lines = myfile.readlines() has_property = "PROPERTIES FOLDER" in "".join(lines) if not has_property: print("=> [folder property missing]") else: print("=> [folder property ok]") return has_property #--------------------------------------------------------------------------- def add_external_data_target(cmakelist): # Should the footer be appended ? footer_already_appended = False # Should a line be added between end of files and footer ? lastline_empty = False with open(cmakelist, 'r') as myfile: lines = myfile.readlines() footer_already_appended = 'ExternalData_add_target'.lower() in "".join(lines).lower() if lines[-1] == "\n": lastline_empty = True # Append footer if not footer_already_appended: with open(cmakelist, "a") as myfile: if not lastline_empty: myfile.write("\n") myfile.write("""#----------------------------------------------------------------------------- if(${SEM_DATA_MANAGEMENT_TARGET} STREQUAL ${CLP}Data) ExternalData_add_target(${CLP}Data) endif() """) print("=> [footer ok] (lastline_empty: %s)" % (lastline_empty)) else: print("=> [footer skip] (already appended)") #--------------------------------------------------------------------------- def get_line(cmakelist, regex, relative_line_index=0): """Return line matching the provided regex. Setting relative_line_index allow to negative (or positve) allows to get the line before (or after) the matching one. """ p = re.compile(regex) with open(cmakelist, 'r') as myfile: lines = myfile.readlines() for index, line in enumerate(lines): if p.match(line): return lines[index + relative_line_index].strip() return None #--------------------------------------------------------------------------- def has_line(cmakelist, regex): return get_line(cmakelist, regex) is not None #--------------------------------------------------------------------------- def remove_matching_lines(cmakelist, regex): p = re.compile(regex) updated_lines = [] removed = 0 with open(cmakelist, 'r') as myfile: lines = myfile.readlines() for line in lines: if not p.match(line): updated_lines.append(line) else: removed = removed + 1 with open(cmakelist, "w") as myfile: myfile.writelines(updated_lines) return removed > 0 #--------------------------------------------------------------------------- def add_line_after(cmakelist, regex, line_to_add, before=True): p = re.compile(regex) updated_lines = [] added = 0 with open(cmakelist, 'r') as myfile: lines = myfile.readlines() for line in lines: if before: updated_lines.append(line) if p.match(line): updated_lines.append(line_to_add + '\n') added = added + 1 if not before: updated_lines.append(line) if not added <= 1: raise Exception("File '%s' is expected to have only one line matching '%s'" % (cmakelist, regex)) with open(cmakelist, "w") as myfile: myfile.writelines(updated_lines) return added > 0 #--------------------------------------------------------------------------- def add_line_before(cmakelist, regex, line_to_add): return add_line_after(cmakelist, regex, line_to_add, before=False) #--------------------------------------------------------------------------- # Datafile helper functions #--------------------------------------------------------------------------- def md5(fname): return hashfile(fname, hashlib.md5) #--------------------------------------------------------------------------- def hashfile(fname, hashfunc): hash = hashfunc() with open(fname) as f: for chunk in iter(lambda: f.read(4096), ""): hash.update(chunk) return hash.hexdigest() #--------------------------------------------------------------------------- def datafile_regex(varname): return ur'(\${' + varname + ur'}\/[\${}\w\-\_]+\.[\w\d]+(\.[\w\d]+)?)' #--------------------------------------------------------------------------- def relocate_test_data(line, varname, global_test_data_dir, module_input_dir): if "${%s}" % varname not in line: return line p = re.compile(datafile_regex(varname)) for m in re.finditer(p, line): g = m.group(1) data_filepath = g.replace("${%s}" % varname, global_test_data_dir) if not os.path.isfile(data_filepath) and os.path.isdir(os.path.dirname(data_filepath)): # We assume the variable is of the form /path/to/brainSlice${pixeltype}.mha # Generate a wild-card expression and glob corresponding files cmake_var_ref_regex = r"\$\{\w+\}" p = re.compile(cmake_var_ref_regex) glob_expr = p.sub("*", data_filepath) print("\n=> [using glob_expr] %s" % glob_expr) data_filepaths = glob.glob(glob_expr) else: print("\n=> [using data_filepath] %s" % data_filepath) data_filepaths = [data_filepath] for data_filepath in data_filepaths: data_filename = os.path.basename(data_filepath) dest_data_filepath = os.path.join(module_input_dir, data_filename) if os.path.isfile(data_filepath): print("\n => [data_filepath] %s" % data_filepath) print(" => [dest_data_filepath] %s" % dest_data_filepath) print(" => [global_test_data_dir -> module_input_dir] %s" % data_filename) dest_data_path = os.path.dirname(dest_data_filepath) if not os.path.exists(dest_data_path): print(" => %s [created]" % dest_data_path) os.makedirs(dest_data_path) shutil.copy(data_filepath, dest_data_filepath) for data_dependency_filepath in datafile_dependencies(data_filepath): shutil.copy(data_dependency_filepath, dest_data_path) print(" => %s [copied]" % os.path.basename(data_dependency_filepath)) if moduleName not in missing_midas_module_input_data: missing_midas_module_input_data[moduleName] = [] missing_midas_module_input_data[moduleName].append(dest_data_filepath) if data_filepath not in global_datafiles_candidate_for_removal: global_datafiles_candidate_for_removal.append(data_filepath) else: print("\n => [data_filepath:nonexistent] %s" % data_filepath) print(" -> original line: %s" % line) return line.replace("${%s}" % varname, '${INPUT}') #--------------------------------------------------------------------------- def datafile_dependency_is_gz(filename): with open(filename, "r") as _file: return '.gz' in "".join(_file.readlines()) #--------------------------------------------------------------------------- def datafile_is_serie(data_filepath): base_dir = os.path.dirname(data_filepath) data_filename = os.path.basename(data_filepath) (basename, ext) = os.path.splitext(data_filename) # This implementation simply check if file '.1.' exists # For a complete implementation see "ExternalData_arg_series()" in # ExternalData CMake module. serie_item = os.path.join(base_dir, basename + '.1' + ext) return os.path.isfile(serie_item) #--------------------------------------------------------------------------- def datafile_dependencies(data_filepath): dependencies = [] (basename, ext) = os.path.splitext(data_filepath) if ext == '.nhdr': if datafile_dependency_is_gz(data_filepath): return [basename + '.raw.gz'] else: return [basename + '.raw'] elif ext == '.hdr': return [basename + '.img'] # XXX Add support for file series return dependencies #--------------------------------------------------------------------------- # Pydas helper functions #--------------------------------------------------------------------------- def has_bitstream(name, checksum): items = communicator.search_item_by_name(name) for item in items: item_id = item['item_id'] item_data = communicator.item_get(token(email, apikey), item_id) if len(item_data['revisions']) > 0: if item_data['revisions'][-1]['bitstreams'][0]['checksum'] == checksum: return True return False #--------------------------------------------------------------------------- def folder_children(folder_id, children_type="folder"): folders = communicator.folder_children(token(email, apikey), folder_id) return {folder['name']: folder[children_type + '_id'] for folder in folders[children_type + 's']} #--------------------------------------------------------------------------- def get_or_create_module_test_data_directory(module_name, test_data_type): # Get folder id, or create it if needed try: module_folder_id = midas_module_names[module_name] #print("=> %s [Found module_folder_id: %s]" % (module_name, module_folder_id)) except KeyError: module_folder = communicator.create_folder(token(email, apikey), module_name, modules_folder_id) module_folder_id = module_folder['folder_id'] #print("=> %s [Created module_folder_id: %s]" % (module_name, module_folder_id)) # Get Nightly folder id, or create it if needed versions = folder_children(module_folder_id) try: nightly_folder_id = versions['Nightly'] #print("=> %s [Found nightly_folder_id: %s]" % (module_name, nightly_folder_id)) except KeyError: nightly_folder = communicator.create_folder(token(email, apikey), 'Nightly', module_folder_id) nightly_folder_id = nightly_folder['folder_id'] #print("=> %s [Created nightly_folder_id: %s]" % (module_name, nightly_folder_id)) # Get Testing folder id, or create it if needed testings = folder_children(nightly_folder_id) try: testing_folder_id = testings['Testing'] #print("=> %s [Found testing_folder_id: %s]" % (module_name, testing_folder_id)) except KeyError: testing_folder = communicator.create_folder(token(email, apikey), 'Testing', nightly_folder_id) testing_folder_id = testing_folder['folder_id'] #print("=> %s [Created testing_folder_id: %s]" % (module_name, testing_folder_id)) # Get testdata_type (Input or Baseline) folder id, or create it if needed testdata_types = folder_children(testing_folder_id) try: test_data_type_folder_id = testdata_types[test_data_type] #print("=> %s [Found test_data_type_folder_id: %s / %s]" % (module_name, test_data_type_folder_id, test_data_type)) except KeyError: test_data_type_folder = communicator.create_folder(token(email, apikey), test_data_type, testing_folder_id) test_data_type_folder_id = test_data_type_folder['folder_id'] #print("=> %s [Created test_data_type_folder_id: %s / %s]" % (module_name, test_data_type_folder_id, test_data_type)) return test_data_type_folder_id #--------------------------------------------------------------------------- def upload_revision(item_id, filepath): checksum = md5(filepath) filename = os.path.basename(filepath) print("=> md5(%s): %s" % (filename, checksum)) upload_token = communicator.generate_upload_token(token(email, apikey), item_id, filename, checksum=checksum, create_additional_revision=True) if upload_token: communicator.perform_upload(upload_token, filename, item_id=item_id, filepath=filepath, create_additional_revision=True) else: print("=> skipping upload: using reference to existing bitstream") print("Done") #--------------------------------------------------------------------------- def upload_item(folder_id, filepath): filename = os.path.basename(filepath) # Get existing item if one exists with the same name item_names = folder_children(folder_id, children_type='item') try: item_id = item_names[filename] print("=> found item: %s" % (item_id)) except KeyError: item = communicator.create_item(token(email, apikey), filename, folder_id) item_id = item['item_id'] print("=> item created: %s" % (item_id)) upload_revision(item_id, filepath) #--------------------------------------------------------------------------- # Map associating module folder that should be created on Midas + data file that should be uploaded missing_midas_module_input_data = {} #--------------------------------------------------------------------------- # List of modules missing the "PROPERTIES FOLDER". modules_missing_folder_property = [] #--------------------------------------------------------------------------- print("\n" + ("*" * 80) + "\nLocal: Process CMakeLists.txt\n" + ("*" * 80)) for root, dirnames, filenames in os.walk(cli_src_directory): if not root.endswith('/Testing'): continue testing_dir = root cmakelist = os.path.join(testing_dir, 'CMakeLists.txt') if not os.path.exists(cmakelist): continue # Set module to process only one module = "" # AddScalarVolumes CheckerBoardFilter MergeModels ModelMaker if module and cmakelist != "%s/%s/Testing/CMakeLists.txt" % (cli_src_directory, module): continue testing_dir = os.path.dirname(cmakelist) # Get moduleName moduleName = os.path.basename(os.path.dirname(testing_dir)) if moduleName not in moduleNames: moduleNames.append(moduleName) print("\n%s" % (cmakelist.replace(cli_src_directory + '/', ''))) if not has_folder_property(cmakelist): modules_missing_folder_property.append(moduleName) add_external_data_target(cmakelist) tmp_cmakelist = cmakelist + ".tmp" with open(tmp_cmakelist, "w") as fout: # Get baseline_dir/input_dir assuming all CLIs are organized similarly. baseline_dir = os.path.join(testing_dir, '..', MODULE_BASELINE_SUBDIR) if not os.path.isdir(baseline_dir): baseline_dir = None input_dir = os.path.join(testing_dir, '..', MODULE_INPUT_SUBDIR) with open(cmakelist, "r") as fin: for line in fin: if replace_add_test: pos = line.find('add_test(') if pos >= 0 and 'ExternalData_add_test' not in line: #print("pos:", pos, line[0:pos]) line = line.replace('add_test(', 'ExternalData_add_test(${SEM_DATA_MANAGEMENT_TARGET}\n' + line[0:pos] + ' ') # Identify all test data that should be copied into the module Baseline or Input directory line = relocate_test_data(line, 'TEST_DATA', global_test_data_dir, input_dir) line = relocate_test_data(line, 'MRML_TEST_DATA', global_mrml_test_data_dir, input_dir) # Add 'DATA{}' where it applies. for varname in ['BASELINE', 'INPUT']: # For example, it will: # - looks for line containing occurences of "${BASELINE}" # - extract the associated path: ${BASELINE}/path/to/foo.nrrd # - replace it with DATA{${BASELINE}/path/to/foo.nrrd} if not already done # - this can be skipped by adding '.nrrd' to 'extensions_to_exclude' if "${%s}" % varname in line: p = re.compile(datafile_regex(varname)) for m in re.finditer(p, line): g = m.group(1) filename = os.path.basename(g) if not os.path.basename(filename) in filenames_to_exclude \ and not os.path.splitext(filename)[1] in extensions_to_exclude: for char in [" ", "(", "="]: line = line.replace(char + g, char + 'DATA{' + g + '}') else: print("=> skipping addition of DATA{}: %s" % m.group(0)) # Update DATA association. # - .nhdr will be associated with either '.raw' or '.raw.gz' # - .hdr will be associated with '.img' if 'DATA{' in line: p = re.compile(ur'DATA{([{}\w\_\-\/\$\.,]*)}') for m in re.finditer(p, line): for g in m.groups(): if g.endswith('.nhdr'): data_filepath = g data_filepath = data_filepath.replace('${CLP}', moduleName) if baseline_dir: data_filepath = data_filepath.replace('${BASELINE}', baseline_dir) data_filepath = data_filepath.replace('${INPUT}', input_dir) data_basename = os.path.splitext(os.path.basename(data_filepath))[0] try: # Append .raw updated = g + ',' + data_basename + '.raw' if datafile_dependency_is_gz(data_filepath): updated = updated + '.gz' #print("...... %s => %s" % (g, updated)) line = line.replace(g, updated) print("=> [data_association] (%s)" % (updated)) except IOError as e: print("=> [data_association] %s" % (e)) if g.endswith('.raw'): # Fix incorrect association. Associate nhdr with '.raw.gz' # instead of '.raw' if it applies. data_filepath = g.split(',')[0] data_filepath = data_filepath.replace('${CLP}', moduleName) if baseline_dir: data_filepath = data_filepath.replace('${BASELINE}', baseline_dir) data_filepath = data_filepath.replace('${INPUT}', input_dir) if os.path.isfile(data_filepath) and datafile_dependency_is_gz(data_filepath): updated = os.path.basename(g) + '.gz' line = line.replace(g, updated) print("=> [data_association] (%s)" % (updated)) if g.endswith('.hdr'): data_filename = os.path.basename(g) base = os.path.splitext(data_filename)[0] # Append .img updated = g + ',' + base + '.img' #print("...... %s => %s" % (g, updated)) line = line.replace(g, updated) print("=> [data_association] (%s)" % (updated)) # Check if data file is associated with a series data_filename = g.replace('${CLP}', moduleName) if baseline_dir: data_filename = data_filename.replace('${BASELINE}', baseline_dir) if input_dir: data_filename = data_filename.replace('${INPUT}', input_dir) # Check if the file belong to a serie if datafile_is_serie(data_filename): updated = g + ',:' line = line.replace(g, updated) print("=> [data_association] (%s)" % (updated)) fout.write(line) # Remove `set(TEST_DATA ...` line remove_matching_lines(tmp_cmakelist, ur'^set\(TEST_DATA .+\)$') remove_matching_lines(tmp_cmakelist, ur'^set\(MRML_TEST_DATA .+\)$') # Add `set(INPUT ...)` line after the `set(BASELINE ...)` if it # exists, otherwise after `set(CLP ...)` if not has_line(tmp_cmakelist, ur'^set\(INPUT .+\)$'): if has_line(tmp_cmakelist, ur'^set\(BASELINE .+\)$'): add_line_after(tmp_cmakelist, ur'^set\(BASELINE .+\)$', 'set(INPUT ${CMAKE_CURRENT_SOURCE_DIR}/../Data/Input)') else: add_line_before(tmp_cmakelist, ur'^set\(CLP .+\)$', 'set(INPUT ${CMAKE_CURRENT_SOURCE_DIR}/../Data/Input)') # Add empty line before `set(CLP ...)` if get_line(tmp_cmakelist, ur'^set\(CLP .+\)$', -1) != '': add_line_before(tmp_cmakelist, ur'^set\(CLP .+\)$', '') # Add setting of SEM_DATA_MANAGEMENT_TARGET if not has_line(tmp_cmakelist, ur'^ set\(SEM_DATA_MANAGEMENT_TARGET \$\{CLP\}Data\)$'): add_line_after( tmp_cmakelist, ur'^set\(CLP .+\)$', """ if(NOT DEFINED SEM_DATA_MANAGEMENT_TARGET) set(SEM_DATA_MANAGEMENT_TARGET ${CLP}Data) endif()""" ) shutil.move(cmakelist + ".tmp", cmakelist) #break # In[2]: #--------------------------------------------------------------------------- # Create module directory in midas if needed, and upload data modules_folder_id = 310 if midas_update: midas_module_names = folder_children(modules_folder_id) print("\n" + ("*" * 80) + "\nMidas: Create module directories and upload missing input test data\n" + ("*" * 80)) for module_name in missing_midas_module_input_data: if not missing_midas_module_input_data[module_name]: continue # Cache ids folder_ids = { "Baseline": None, "Input": None, } # Upload test data for data_filepath in missing_midas_module_input_data[module_name]: sub_directory = os.path.dirname(data_filepath).split('/')[-1] assert sub_directory in folder_ids if folder_ids[sub_directory] is None: folder_ids[sub_directory] = get_or_create_module_test_data_directory(module_name, sub_directory) assert folder_ids[sub_directory] is not None print("%s [folder_id:%s]" % (module_name, folder_ids[sub_directory])) print("%s uploading %s" % (module_name, data_filepath)) upload_item(folder_ids[sub_directory], data_filepath) # In[3]: #--------------------------------------------------------------------------- # File that could potentially be removed from the print("\n" + ("*" * 80) + "\nMidas: Potential candidate for removal\n" + ("*" * 80)) # get all list contains in the map, flatten the list, remove duplicates #_list = [v for (k, v) in missing_midas_module_input_data.iteritems()] #global_datafiles_candidate_for_removal = list(set(itertools.chain.from_iterable(_list))) for datafile in global_datafiles_candidate_for_removal: print(datafile) #--------------------------------------------------------------------------- print("\n" + ("*" * 80) + "\nLocal: Convert data file to md5 if it applies\n" + ("*" * 80)) # Convert data file to hash files if it applies for data_directory in get_module_data_directories(cli_src_directory): for filename in os.listdir(data_directory): filepath = os.path.join(data_directory, filename) basename = os.path.basename(filename) print("\n%s" % filename) if basename in filenames_to_exclude: print("\n%s skipping" % filename) continue if convert_to_hashfiles: if os.path.splitext(filepath)[1] not in extensions_to_exclude: for hash_algo, hash_func in hash_functions.items(): checksum = hashfile(filepath, hash_func) with open(filepath + "." + hash_algo, "w") as myfile: myfile.write(checksum + "\n") print("=> %s.%s [done]" % (basename, hash_algo)) shutil.move(filepath, filepath + '.bkp') print("=> %s.bkp [done]" % basename) filepath = filepath + '.bkp' basename = os.path.basename(filename) if delete_bkp and filepath.endswith('.bkp'): os.remove(filepath) print("=> %s [removed]" % basename) print("=> [ok]") elif os.path.splitext(filepath)[1] == '.bkp': original_filename = filepath.replace('.bkp', '') shutil.move(filepath, original_filename) print("=> %s [restored]" % original_filename) print("\n%s restoring" % original_filename) else: print("=> [nothing-to-do]") #--------------------------------------------------------------------------- if check_data_is_uploaded: check_module_test_data_are_uploaded(cli_src_directory) # Display command allowing to run associated tests print("\n" + ("*" * 80) + "\nLocal: CTest commands to run associated tests\n" + ("*" * 80)) for moduleName in moduleNames: print("ctest -R %s" % moduleName) print("\nctest -R '%s'" % "|".join(moduleNames)) # Display modules missing the call setting "FOLDER" property on the test executable print("\n" + ("*" * 80) + "\nLocal: CLI testing targets missing FOLDER property\n" + ("*" * 80)) for moduleName in modules_missing_folder_property: print(" %s" % moduleName) # In[4]: print("\n" + ("*" * 80) + "\nLocal: CTest commands to run associated tests\n" + ("*" * 80)) print("\nctest -R '%s'" % "|".join(moduleNames))