diff --git a/.lldbinit.in b/.lldbinit.in new file mode 100644 index 0000000000000000000000000000000000000000..fd3bec78930ddd7276f4ad62649bf2750ac30281 --- /dev/null +++ b/.lldbinit.in @@ -0,0 +1,3 @@ +command regex dump 's/(.+)/expression -O -l c++ -- ((llvm::Value*) %1 )->dump()/' +command alias d dump +command script import ${CMAKE_SOURCE_DIR}/lldb/ValueFormatter.py \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 3f6a113718aadb3684447fea962effe41921f2ed..ef3b289c7d673b823cdc2b4a70ab75dd8a04846c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,26 +1,334 @@ -# If we don't need RTTI or EH, there's no reason to export anything -# from the hello plugin. -#if( NOT LLVM_REQUIRES_RTTI ) -# if( NOT LLVM_REQUIRES_EH ) -# set(LLVM_EXPORTED_SYMBOL_FILE ${CMAKE_CURRENT_SOURCE_DIR}/Hello.exports) -# endif() -#endif() - -if(WIN32 OR CYGWIN) - set(LLVM_LINK_COMPONENTS Core Support) +cmake_minimum_required(VERSION 3.4.3) + +project(PAIN + VERSION 1.0.0 + LANGUAGES C CXX +) + +set(CMAKE_CXX_STANDARD 17) + +set (PROJECT_DESCRIPTION "Implements an LLVM analysis pass using abstract interpretation.") +set (PROJECT_HOMEPAGE_URL "https://versioncontrolseidl.in.tum.de/petter/llvm-abstractinterpretation") + +if (NOT PATH_TO_LLVM) + message(FATAL_ERROR " + The cmake is supposed to be called with PATH_TO_LLVM pointing to + a precompiled version of LLVM or to to the source code of LLVM + Examples: + cmake -G \"${CMAKE_GENERATOR}\" -DPATH_TO_LLVM=/opt/llvm-9.0.1 ${CMAKE_SOURCE_DIR} + cmake -G \"${CMAKE_GENERATOR}\" -DPATH_TO_LLVM=/llvm-project/llvm ${CMAKE_SOURCE_DIR} +") endif() -# For older LLVM < 8.0 replace first line with this -# add_llvm_loadable_module(llvm-pain -add_llvm_library(llvm-pain MODULE +if (NOT IS_ABSOLUTE ${PATH_TO_LLVM}) + # Convert relative path to absolute path + get_filename_component(PATH_TO_LLVM + "${PATH_TO_LLVM}" REALPATH BASE_DIR "${CMAKE_BINARY_DIR}") +endif() + +set (BUILD_AGAINST_PRECOMPILED_LLVM TRUE) +if (EXISTS ${PATH_TO_LLVM}/CMakeLists.txt) + set (BUILD_AGAINST_PRECOMPILED_LLVM FALSE) +endif() + +# This enables assertions for Release builds. +# https://stackoverflow.com/questions/22140520/how-to-enable-assert-in-cmake-release-mode +string(REPLACE "-DNDEBUG" "" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") + +if (${BUILD_AGAINST_PRECOMPILED_LLVM}) + set (search_paths + ${PATH_TO_LLVM} + ${PATH_TO_LLVM}/lib/cmake + ${PATH_TO_LLVM}/lib/cmake/llvm + ${PATH_TO_LLVM}/lib/cmake/clang + ${PATH_TO_LLVM}/share/clang/cmake/ + ${PATH_TO_LLVM}/share/llvm/cmake/ + ) + + find_package(LLVM REQUIRED CONFIG PATHS ${search_paths} NO_DEFAULT_PATH) + find_package(Clang REQUIRED CONFIG PATHS ${search_paths} NO_DEFAULT_PATH) + + list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}") + +else() + + set (LLVM_ENABLE_PROJECTS "clang" CACHE BOOL "Build only Clang when building against monorepo" FORCE) + set (LLVM_TARGETS_TO_BUILD "host" CACHE STRING "Only build targets for host architecture" FORCE) + #set (BUILD_SHARED_LIBS "On" CACHE BOOL "Link LLVM libraries dynamically" FORCE) + + add_subdirectory(${PATH_TO_LLVM} llvm-build) + + if (NOT TARGET clangTooling) + message(FATAL_ERROR " + Cannot find clangTooling target. Did you forget to clone clang sources? + Clean CMake cache and make sure they are available at: + ${PATH_TO_LLVM}/tools/clang") + endif() + + # Normally, include paths provided by LLVMConfig.cmake + # In this case we can 'steal' them from real targets + get_target_property(llvm_support_includes LLVMSupport INCLUDE_DIRECTORIES) + get_target_property(clang_tooling_includes clangTooling INCLUDE_DIRECTORIES) + set(LLVM_INCLUDE_DIRS ${llvm_support_includes} ${clang_tooling_includes}) + list(REMOVE_DUPLICATES LLVM_INCLUDE_DIRS) + + # Manually include the llvm CMake modules + list(APPEND CMAKE_MODULE_PATH + "${PATH_TO_LLVM}/cmake" + "${PATH_TO_LLVM}/cmake/modules" + ) + + set(LLVM_MAIN_SRC_DIR ${PATH_TO_LLVM}) + +endif() + +include(LLVM-Config) +include(HandleLLVMOptions) +include(AddLLVM) + +if ("LLVM" IN_LIST LLVM_AVAILABLE_LIBS) + set (LLVM_AVAILABLE_LIBS + LLVM + ) +else() + set (LLVM_AVAILABLE_LIBS + LLVMSupport + LLVMCore + LLVMAnalysis + ) +endif() + +set(PAIN_SOURCES src/fixpoint.cpp - src/fixpoint.h + src/fixpoint_widening.cpp src/value_set.cpp - src/value_set.h src/simple_interval.cpp + src/normalized_conjunction.cpp + src/linear_equality.cpp + src/linear_subspace.cpp +) + +set(PAIN_HEADERS + src/fixpoint.h + src/value_set.h src/simple_interval.h + src/normalized_conjunction.h + src/linear_equality.h + src/general.h + src/global.h + src/abstract_state.h + src/hash_utils.h + src/linear_subspace.h + src/simple_matrix.h + src/sparse_matrix.h +) + +include_directories(${LLVM_INCLUDE_DIRS}) + +add_llvm_library(llvm-pain MODULE + ${PAIN_SOURCES} + ${PAIN_HEADERS} DEPENDS intrinsics_gen + irgen PLUGIN_TOOL opt +) + +# +# Tests +# + +add_llvm_executable(simple_interval_test + test/simple_interval_test.cpp + ${PAIN_HEADERS} + ${PAIN_SOURCES} + DEPENDS + irgen + intrinsics_gen +) + +target_link_libraries(simple_interval_test + PRIVATE ${LLVM_AVAILABLE_LIBS} +) + +add_llvm_executable(normalized_conjunction_test + test/normalized_conjunction_test.cpp + ${PAIN_HEADERS} + ${PAIN_SOURCES} + DEPENDS + irgen + intrinsics_gen +) + +target_link_libraries(normalized_conjunction_test + PRIVATE ${LLVM_AVAILABLE_LIBS} +) + +add_llvm_executable(linear_subspace_test + test/linear_subspace_test.cpp + ${PAIN_HEADERS} + ${PAIN_SOURCES} + DEPENDS + irgen + intrinsics_gen +) + +target_link_libraries(linear_subspace_test + PRIVATE ${LLVM_AVAILABLE_LIBS} +) + +add_llvm_executable(simple_matrix_test + test/simple_matrix_test.cpp + ${PAIN_HEADERS} + ${PAIN_SOURCES} +) + +target_link_libraries(simple_matrix_test + PRIVATE ${LLVM_AVAILABLE_LIBS} +) + +add_llvm_executable(sparse_matrix_test + test/sparse_matrix_test.cpp + ${PAIN_HEADERS} + ${PAIN_SOURCES} +) + +target_link_libraries(sparse_matrix_test + PRIVATE ${LLVM_AVAILABLE_LIBS} +) + +enable_testing() + +add_test(NAME intervalAnalysisTest + COMMAND opt --load $ --painpass -S ${CMAKE_SOURCE_DIR}/output/add-1.ll +) + +add_test(NAME simpleIntervalTest + COMMAND simple_interval_test +) + +add_test(NAME normalizedConjunctionTest + COMMAND normalized_conjunction_test +) + +add_test(NAME linearSubspaceTest + COMMAND linear_subspace_test +) + +add_test(NAME simpleMatrixTest + COMMAND simple_matrix_test +) + +add_test(NAME sparseMatrixTest + COMMAND sparse_matrix_test +) + +# +# Samples +# + +set(SAMPLES + add-1-float + add-1 + add-2 + add-3 + add-4 + basic_function + branching + cmp-two-variables-1 + cmp-two-variables-2 + euler-48 + euler + example + for-loop-1 + for + func-test-1 + func-test-2 + func-test-3 + func-test-4 + func-test-5 + func-test-for + func-test-rec-endless + func-test-rec + func-test-switch + gcd + goto + if-and + if-multiple-of-4 + if-then-else-2 + if-then-else-complicated + if-then-else-two-constraints + if-then-else + multiple-functions + ops + switch-2 + switch-3 + switch-two-labels + switch + while-1 + while-2 + while-bigger-steps + while-neg + example-1 + A + B + C + D +) + +# Older CMake version do not support list transformations +macro(list_transform_prepend in prefix) + foreach(f ${${in}}) + list(APPEND temp "${prefix}${f}") + endforeach() + set(${in} "${temp}") + unset(temp) +endmacro() + +macro(list_transform_append in suffix out) + foreach(f ${${in}}) + list(APPEND ${out} "${f}${suffix}") + endforeach() +endmacro() + +list_transform_append(SAMPLES ".c" SAMPLES_C) +list_transform_append(SAMPLES ".bc" SAMPLES_BC) +list_transform_append(SAMPLES ".ll" SAMPLES_LL) +list_transform_prepend(SAMPLES_LL "${CMAKE_SOURCE_DIR}/output/") +list_transform_prepend(SAMPLES_BC "${CMAKE_SOURCE_DIR}/output/") +list_transform_prepend(SAMOLES_C "${CMAKE_SOURCE_DIR}/samples/") + +add_custom_target(irgen + DEPENDS ${SAMPLES_LL} ${SAMPLES_BC} +) + +file(MAKE_DIRECTORY ${CMAKE_SOURCE_DIR}/output) + +if (APPLE) + +foreach(src ${SAMPLES}) + add_custom_command( + OUTPUT ${CMAKE_SOURCE_DIR}/output/${src}.ll + COMMAND clang --sysroot ${CMAKE_OSX_SYSROOT} -O0 -emit-llvm ${CMAKE_SOURCE_DIR}/samples/${src}.c -Xclang -disable-O0-optnone -c -o ${CMAKE_SOURCE_DIR}/output/${src}.bc + COMMAND opt -S -mem2reg ${CMAKE_SOURCE_DIR}/output/${src}.bc -o ${CMAKE_SOURCE_DIR}/output/${src}.ll + DEPENDS clang opt ${CMAKE_SOURCE_DIR}/samples/${src}.c + COMMENT "Generating LLVM IR for example ${src}" + ) +endforeach(src) + +else() + +foreach(src ${SAMPLES}) + add_custom_command( + OUTPUT ${CMAKE_SOURCE_DIR}/output/${src}.ll + COMMAND clang -O0 -emit-llvm ${CMAKE_SOURCE_DIR}/samples/${src}.c -Xclang -disable-O0-optnone -c -o ${CMAKE_SOURCE_DIR}/output/${src}.bc + COMMAND opt -S -mem2reg ${CMAKE_SOURCE_DIR}/output/${src}.bc -o ${CMAKE_SOURCE_DIR}/output/${src}.ll + DEPENDS clang opt ${CMAKE_SOURCE_DIR}/samples/${src}.c + COMMENT "Generating LLVM IR for example ${src}" ) +endforeach(src) + +endif() + +configure_file(.lldbinit.in .lldbinit) \ No newline at end of file diff --git a/README.md b/README.md index 8308436051365cf0a0abe63dae5703fb41d03bb4..96a71f108031df3504597fde6cc5205dc818529b 100644 --- a/README.md +++ b/README.md @@ -1,55 +1,68 @@ -# Program Optimization Lab 2018 +# Program Optimization Lab 202X -Implements an LLVM analysis pass using abstract interpretation. +Implementing an LLVM analysis framework based upon the Seidl Program Optimization Lecture. ## Build -Get the LLVM source code from [here](http://releases.llvm.org/download.html). Then get clang as well, into `llvm/tools`. Create a build directory somewhere, initialise CMake, and build. For example +### Build against a system-wide installed LLVM +Install the LLVM packages from your distro's package manager, e.g. Ubuntu 20.04: - # From your llvm-9.0.0-src, or whatever the version is now - wget http://releases.llvm.org/9.0.0/llvm-9.0.0.src.tar.xz - tar xf llvm-9.0.0.src.tar.xz + # install the necessary LLVM packages + sudo apt install cmake clang libclang-10-dev llvm-10-dev + # now continue by building the project + git clone https://versioncontrolseidl.in.tum.de/petter/llvm-abstractinterpretation.git + cd llvm-abstractinterpretation + mkdir build + cd build + cmake -G "Unix Makefiles" -DPATH_TO_LLVM=/usr/lib/llvm-10 .. + make + +You can do this, however the precompiled LLVM binaries come without symbol names, thus debugging +might be a little harder this way. Alterntively consider the following route: + +### Build against custom downloaded LLVM Sources +Get the LLVM source code from [here](https://releases.llvm.org/download.html). Then get clang as well, into `llvm/tools`. Create a build directory somewhere, initialise CMake, and build. For example + + # From llvm-10.0.0-src, or whatever the version is now + wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/llvm-10.0.0.src.tar.xz + tar xf llvm-10.0.0.src.tar.xz # now also download clang - cd llvm-9.0.0.src/tools - wget http://releases.llvm.org/9.0.0/cfe-9.0.0.src.tar.xz - tar xf cfe-9.0.0.src.tar.xz - mv cfe-9.0.0.src clang + cd llvm-10.0.0.src/tools + wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang-10.0.0.src.tar.xz + tar xf clang-10.0.0.src.tar.xz + mv clang-10.0.0.src clang cd ../.. # now continue by building LLVM mkdir llvm_build cd llvm_build - cmake ../llvm-?.?.?-src -DLLVM_TARGETS_TO_BUILD=X86 - make -j2 - -The parallel make may run out of memory at the end. You can restart it sequentially by issuing another `make -j1`. + # important: Don't forget to restrict to X86, otherwise prepare for a day of compiling + cmake ../llvm-10.0.0-src -DLLVM_TARGETS_TO_BUILD=X86 + # 4x parallelized make, which will probably fail due to RAM consumption + make -j4 + # make -j1 in order to catch up, where the parallel make aborted -Now we can initalise the repository. +On a 4 core i7-8550U with 16GB RAM this may take up to 3:00h for a sequentially run make ( `make -j1` ) to account for a poor man's RAM equipment. Also, the build will need at least 50GB of disk space, be sure to have enough room... - cd .. - git clone ssh://git@github.com/PUT/THE/CORRECT/REPOSITORY/IN/HERE.git - cd PUT/THE/CORRECT/REPOSITORY/IN/HERE - python3 init.py -The script should be able to find your LLVM and clang. If it is not, you need to specify them by hand. - -At last, let us compile and run the samples. +If there are errors regarding missing header files, you probably need to rebuild llvm. - python3 run.py --make +## Author during Bachelor Thesis 2019/20 -If there are errors regarding missing header files, you probably need to rebuild llvm. +* Tim Gymnich -## Useful things +## Authors Lab Course WS 2019/20 -The `run.py` script contains everything, up to and including the kitchen sink. It can run the samples, build, run the debugger, as well as build and run the tests. Just read its help message to get all the good stuff. I want to highlight the `-n` option, which causes it to just print out the commands it would run. This is great to just copy-paste the relevant ones into your terminal (or IDE). +* Florian Stamer +* Dmytro Yakymets -## Authors +## Authors Lab Course WS 2018/19 * Ramona Brückl * Philipp Czerner ([github](https://github.com/suyjuris/), [mail](mailto:philipp.czerner@nicze.de)) * Tim Gymnich * Thomas Frank -### Authors of previous semesters +## Authors Lab Course SS 2018 * Julian Erhard * Jakob Gottfriedsen * Peter Munch diff --git a/init.py b/init.py deleted file mode 100755 index 61010ec2aa30edc78f975b49ee043435b859e1dd..0000000000000000000000000000000000000000 --- a/init.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/python3 -# coding: utf-8 - -import argparse -import os -import subprocess -import sys - -if sys.version_info[0] < 3: - print("Error: This script only supports Python 3") - sys.exit(5) - -parser = argparse.ArgumentParser(description='Setup the project. This creates the necessary symbolic links in the LLVM source code and adds the entries into the right CMakeList.txt. Also initialises the configuration for the run.py script.') -parser.add_argument('--llvm-path', help='path to the LLVM build directory, containing a file bin/opt.') -parser.add_argument('--llvm-src', help='path to the LLVM source directory, containing lib/Analysis ') -parser.add_argument('--clang-path', help='path to the clang build direcotry, containing a file bin/clang') -args = parser.parse_args() - -llvm_path = args.llvm_path -llvm_src = args.llvm_src -clang_path = args.clang_path - -project_dir = os.path.dirname(os.path.abspath(sys.argv[0])) -project_name = 'AbstractInterpretation' - -cmake = 'cmake' - -if llvm_path is None: - # Try to guess the correct path - d = os.path.abspath(project_dir) - while os.path.dirname(d) != d: - d = os.path.dirname(d) - for i in os.listdir(d): - if os.path.isfile(d + '/' + i + '/bin/opt'): - llvm_path = d + '/' + i - print('Auto-detecting llvm-path as ' + llvm_path) - break - if llvm_path is not None: break - else: - print('Error: No llvm-path specified (use --llvm-path)') - parser.print_help() - sys.exit(1) - -if llvm_src is None: - # Try to guess the correct path - d = os.path.abspath(project_dir) - while os.path.dirname(d) != d: - d = os.path.dirname(d) - for i in os.listdir(d): - if os.path.isfile(d + '/' + i + '/cmake_install.cmake'): - continue - if os.path.isdir(d + '/' + i + '/lib/Analysis'): - llvm_src = d + '/' + i - print('Auto-detecting llvm-src as ' + llvm_src) - break - if llvm_src is not None: break - else: - print('Error: No llvm-src specified (use --llvm-src)') - parser.print_help() - sys.exit(1) - -if clang_path is None: - clang_path = llvm_path - print('clang-path not specified, defaulting to ' + clang_path) - -opt = llvm_path + '/bin/opt' -llvm_dest = llvm_src + '/lib/Analysis' -clang = clang_path + '/bin/clang' - -if not os.path.isfile(opt): - print('Error: no opt exists at ' + opt + ' (maybe you forgot to build LLVM?)') - sys.exit(2) -if not os.path.isdir(llvm_dest): - print('Error: directory does not exist, at ' + llvm_dest) - sys.exit(2) -if not os.path.isfile(clang): - print('Error: no clang exists at ' + clang) - sys.exit(2) - -# Create the symbolic link in the LLVM sources -try: - link_name = llvm_dest + '/' + project_name - os.symlink(project_dir, link_name, target_is_directory=True) - print('Created symbolic link from %s to %s' % (link_name, project_dir)) -except FileExistsError: - print('Symlink already exists') - -# Write the configuration for the run.py script -config_file_name = project_dir + '/.config'; -config_file = open(config_file_name, 'w') -config_file.write(llvm_src+'\n'+llvm_path+'\n'+clang_path+'\n') -config_file.close() -print('Wrote configuration to %s' % (config_file_name,)) - -# Adjust CMakeLists.txt -cmake_file_name = llvm_dest + '/CMakeLists.txt' -line_to_insert = 'add_subdirectory(%s)\n' % (project_name,) -cmake_file = open(cmake_file_name, 'r') -lines = cmake_file.readlines() -needs_changes = line_to_insert not in lines -cmake_file.close() -if needs_changes: - cmake_file = open(cmake_file_name, 'a') - cmake_file.write('\n' + line_to_insert) - cmake_file.close() - print('CMakeLists.txt modified, at %s' % (cmake_file_name,)) - - # Also regenerate the CMake cache - print('Rebuilding cmake cache') - subprocess.run([cmake, llvm_path], check=True) -else: - print('CMakeLists.txt is fine') - diff --git a/lldb/ValueFormatter.py b/lldb/ValueFormatter.py new file mode 100644 index 0000000000000000000000000000000000000000..28900c5e99dff0b2938e6893aaad839e1b7e1fe9 --- /dev/null +++ b/lldb/ValueFormatter.py @@ -0,0 +1,16 @@ +import lldb + +def ValueFormatter(valobj, internal_dict): + frame = lldb.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().GetSelectedFrame() + options = lldb.SBExpressionOptions() + options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus) + options.SetTrapExceptions(False) + options.SetTimeoutInMicroSeconds(5000000) # 5s + + addr = valobj.GetValue() + expr = frame.EvaluateExpression('((llvm::Value*) {0})->getName().data()'.format(addr), options) + name = expr.GetSummary() + return '{0}'.format(name) + +def __lldb_init_module(debugger, internal_dict): + debugger.HandleCommand("type summary add -F " + __name__ + ".ValueFormatter llvm::Value") diff --git a/run.py b/run.py deleted file mode 100755 index f0e6370f95c393d31b8403a51e43a0106e70bc3f..0000000000000000000000000000000000000000 --- a/run.py +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/python3 -# coding: utf-8 - -import argparse -import os -import platform -import shlex -import subprocess -import sys - -if sys.version_info[0] < 3: - print("Error: This script only supports Python 3") - sys.exit(5) - -project_dir = os.path.abspath(os.path.dirname(sys.argv[0])) -os.chdir(project_dir) - -if not os.path.isfile(".config"): - print("No config file was found. Please run init.py first!") - sys.exit(-1) - -config_file = open(".config", "r") -lines = config_file.readlines() -llvm_path = lines[1].strip() -clang_path = lines[2].strip() -config_file.close() - -opt = llvm_path + '/bin/opt' -llvm_dis = llvm_path + '/bin/llvm-dis' -llvm_config = llvm_path + '/bin/llvm-config' -clang = clang_path + '/bin/clang' -cmake = 'cmake' -gdb = 'gdb' -lldb = 'lldb' - -CXX = os.environ.get('CXX', 'c++') - -if not os.path.isfile(opt): - print('Error: no opt exists at ' + opt + ' (maybe you forgot to build LLVM?)') - sys.exit(2) -if not os.path.isfile(llvm_dis): - print('Error: no llvm-dis exists at ' + llvm_dis + ' (maybe you forgot to build LLVM?)') - sys.exit(2) -if not os.path.isfile(clang): - print('Error: no clang exists at ' + clang) - sys.exit(2) - -if platform.system() == 'Linux': - libeext = '.so' -elif platform.system() == 'Windows': - libeext = '.dll' - print('Error: Windows is not supported. (You can try to delete this error and proceed at your own risk.') - sys.exit(4) -elif platform.system() == 'Darwin': - libeext = '.dylib' -else: - print('Error: Unknown platform ' + platform.system()) - sys.exit(4) - -pass_lib = llvm_path + "/lib/llvm-pain" + libeext -pass_name = "painpass" -make_target = "llvm-pain" - -samples = project_dir + '/samples' - -def main(): - def run(arg, cwd=None, redirect=None): - if not args.only_print: - try: - if redirect: - f = open(redirect, 'w') - subprocess.run(arg, cwd=cwd, stdout=f, stderr=f, check=True) - f.close() - else: - subprocess.run(arg, cwd=cwd, check=True) - except subprocess.CalledProcessError as e: - print('Error: while executing ' + str(e.cmd)) - if redirect: - f.close() - print(open(redirect, 'r').read()) - sys.exit(3) - else: - cmd = ' '.join(shlex.quote(i) for i in arg) - if redirect: - cmd += ' > %s 2>&1' % (shlex.quote(redirect),) - if cwd is None: - print(' ' + cmd) - else: - print(' ( cd %s && %s )' % (shlex.quote(cwd), cmd)) - - parser = argparse.ArgumentParser() - parser.add_argument("file", help="run only the specfied files", nargs='*') - parser.add_argument("-n", dest='only_print', help="only print the commands, do not execute anything", action="store_true") - parser.add_argument("-v", dest='show_output', help="show output on stdout", action="store_true") - parser.add_argument("--cfg", dest='view_cfg', help="show llvm control flow graph", action="store_true") - parser.add_argument("--make", dest='do_make', help="call make before executing the script", action="store_true") - parser.add_argument("--only-make", dest='do_make_only', help="only call make, do not execute any samples", action="store_true") - parser.add_argument("--gdb", dest='do_gdb', help="open the gdb debugger for the specified file", action="store_true") # might not work with mac OS try lldb - parser.add_argument("--lldb", dest='do_lldb', help="open the lldb debugger for the specified file", action="store_true") - parser.add_argument("--run-test", dest='run_test', help="run the test for SimpleInterval", action="store_true") - parser.add_argument("--use-cxx", metavar='path', dest='use_cxx', help="use as c++ compiler when building the test") - args = parser.parse_args() - - # If no files are specified, set it to all .c files in current directory - files = args.file - if args.run_test: - if files: - print('Error: you are trying to both run the test and a file. This does not really make sense.') - sys.exit(4) - elif args.do_gdb: - if len(files) != 1: - print('Error: you are trying to run the debugger on multiple files. This does not really make sense, just specify a single one.') - sys.exit(4) - elif args.do_make_only: - args.do_make = True - files = [] - args.run_test = False - elif not files: - files = [i for i in os.listdir(samples) if i.endswith('.c')] - - if args.do_make: - print("Building %s..." % (make_target,)) - run([cmake, '--build', llvm_path, '--target', make_target]) - - if not os.path.isfile(pass_lib): - print('Error: Could not find shared library ' + pass_lib) - print('Please build the project (for example by running this script with the option --make') - sys.exit(7) - - os.makedirs('output', exist_ok=True) - - for fname in files: - f_orig = 'samples/%s' % (fname,) - f_bc = 'output/%s-tmp.bc' % (fname,) - f_optbc = 'output/%s.bc' % (fname,) - f_optll = 'output/%s.ll' % (fname,) - f_out = 'output/%s.out' % (fname,) - - if not os.path.isfile(f_orig): - print("Error: " + f_orig +" not found!") - continue - print("Processing file " + fname + " ...") - - run([clang, '-O0', '-emit-llvm', f_orig, '-Xclang', '-disable-O0-optnone', '-c', '-o', f_bc]) - run([opt, '-mem2reg', f_bc, '-o', f_optbc]) - run([llvm_dis, f_optbc, '-o', f_optll]) - run(['rm', f_bc, f_optbc]) - - redir = None if args.show_output else f_out - args_add = [] - if args.view_cfg: - args_add.append('--view-cfg') - - base_args = ['-load', pass_lib, '-'+pass_name, '-S'] + args_add + ['-o', '/dev/null', f_optll] - if not args.do_gdb: - run([opt] + base_args, redirect=redir) - else: - break_at = 'pcpo::AbstractInterpretationPass::runOnModule(llvm::Module&)' - if not args.only_print: - print('In a moment, gdb is going to read in the symbols of opt. As you might notice, that takes a long time. So, here is a tip: Just restart the program using r (no need to specify arguments). Even if you rebuild the project, that is in a shared library and will thus be reloaded the next time you start the program.') - run([gdb, '-q', opt, '-ex', 'r ' + ' '.join(map(shlex.quote, base_args))]) - - if not args.do_lldb: - run([opt] + base_args, redirect=redir) - else: - break_at = 'pcpo::AbstractInterpretationPass::runOnModule(llvm::Module&)' - run([lldb, opt, '--', ' '.join(map(shlex.quote, base_args))]) - - if args.run_test: - if not os.path.isfile(llvm_config): - print('Error: no llvm-config exists at ' + llvm_config + ' (maybe you forgot to build LLVM?)') - sys.exit(2) - - os.makedirs('build', exist_ok=True) - - cxx = CXX - if args.use_cxx is not None: - cxx = args.use_cxx - - print('Info: Building the test using %s' % (cxx,)) - - cxxflags = subprocess.run([llvm_config, '--cxxflags'], stdout=subprocess.PIPE).stdout.decode('ascii').split() - ldflags = subprocess.run([llvm_config, '--ldflags' ], stdout=subprocess.PIPE).stdout.decode('ascii').split() - libs = subprocess.run([llvm_config, '--libs', 'analysis'], stdout=subprocess.PIPE).stdout.decode('ascii').split() - if platform.system() == 'Darwin': - libs += '-lz -ldl -lpthread -lm -lcurses'.split() - else: - libs += '-lz -lrt -ldl -ltinfo -lpthread -lm'.split() - - run([cxx, 'test/simple_interval_test.cpp', 'src/simple_interval.cpp', '-Isrc', '-fmax-errors=2'] + cxxflags - + ['-o', 'build/SimpleIntervalTest'] + ldflags + libs) - - try: - run(['build/SimpleIntervalTest']) - except KeyboardInterrupt: - pass - -if __name__ == "__main__": - main() diff --git a/samples/A.c b/samples/A.c new file mode 100644 index 0000000000000000000000000000000000000000..dff0c3a3e0a789c9f4dfe45b3f64103c46c3e47a --- /dev/null +++ b/samples/A.c @@ -0,0 +1,144 @@ +/* + * This is a RANDOMLY GENERATED PROGRAM. + * + * Generator: csmith 2.3.0 + * Git version: 30dccd7 + * Options: --output A.c --no-argc --no-arrays --no-bitfields --no-checksum --no-comma-operators --no-compound-assignment --no-divs --no-float --no-structs --no-unions --no-packed-struct --no-pointers --no-builtins --no-math64 --no-longlong --no-checksum --no-safe-math --max-block-size 2 --max-funcs 2 + * Seed: 7117265377608335715 + */ + + +#define NO_LONGLONG + +#define int8_t signed char +#define uint8_t unsigned char + +#define int16_t short +#define uint16_t unsigned short + +#define int32_t int +#define uint32_t unsigned + +#define int64_t long long +#define uint64_t unsigned long long + +static inline void +platform_main_end (uint64_t x, int flag) +{ +#ifndef NOT_PRINT_CHECKSUM + if (!flag) { +#ifdef NO_PRINTF + int i; + my_puts ("checksum = "); + for (i=0; i<16; i++) { + put_hex (x & 0xf); + x >>= 4; + } + putchar ('\n'); +#else + printf ("checksum = %llx\n", x); +#endif + } +#endif +} +volatile uint32_t csmith_sink_ = 0; + +static long __undefined; + +/* --- Struct/Union Declarations --- */ +/* --- GLOBAL VARIABLES --- */ +static int32_t g_2 = (-1); +static int32_t g_3 = 0x245F1DBE; +static int16_t g_62 = 0xE6DE; +static int32_t g_65 = 0; +static int8_t g_66 = (-5); + + +/* --- FORWARD DECLARATIONS --- */ +static uint16_t func_1(void); +static int8_t func_16(const int16_t p_17, uint8_t p_18, int8_t p_19, int32_t p_20, uint32_t p_21); + + +/* --- FUNCTIONS --- */ +/* ------------------------------------------ */ +/* + * reads : g_2 g_3 g_66 + * writes: g_3 g_62 g_65 g_66 g_2 + */ +static uint16_t func_1(void) +{ /* block id: 0 */ + const uint16_t l_22 = 65526U; + int32_t l_32 = (-1); + g_2 = ((g_3 = g_2) && ((uint8_t)(((((g_66 = ((((uint8_t)g_2 % (uint8_t)((uint16_t)((uint16_t)(0x8951 >= (-1)) >> (uint16_t)((int8_t)(g_65 = ((int8_t)func_16(l_22, ((uint16_t)(l_32 = (l_22 && (g_2 || ((0U ^ ((uint16_t)(((((uint8_t)((uint8_t)(+g_3) << (uint8_t)3) >> (uint8_t)0) <= 0U) <= g_3) <= 0xAF9CA3EA) + (uint16_t)g_2)) == l_22)))) << (uint16_t)g_3), l_22, g_2, g_2) >> (int8_t)g_2)) >> (int8_t)l_22)) >> (uint16_t)2)) <= g_2) & 0U)) | l_22) || l_32) > g_2) ^ 1) + (uint8_t)l_22)); + return g_66; +} + + +/* ------------------------------------------ */ +/* + * reads : g_2 g_3 + * writes: g_62 g_3 + */ +static int8_t func_16(const int16_t p_17, uint8_t p_18, int8_t p_19, int32_t p_20, uint32_t p_21) +{ /* block id: 3 */ + uint32_t l_35 = 4294967293U; + uint16_t l_41 = 1U; + int32_t l_61 = 0xBF2A6BC6; + int32_t l_63 = 4; + int32_t l_64 = 0x4FFA2B87; + g_3 = (((uint8_t)(((l_35 && ((int16_t)(!l_35) + (int16_t)((uint16_t)(((0x6D23205E <= (l_64 = ((l_63 = ((l_41 = p_19) || ((int32_t)((int16_t)(g_62 = ((int8_t)(((((uint8_t)((int16_t)((p_19 = p_20) ^ 0U) - (int16_t)(((int16_t)0xCCFE >> (int16_t)8) < g_2)) << (uint8_t)((uint16_t)(((l_61 = ((int8_t)((int8_t)(!p_18) - (int8_t)g_3) >> (int8_t)5)) && l_35) & p_20) << (uint16_t)1)) != 0xD9) <= l_35) > 1) % (int8_t)g_2)) * (int16_t)p_18) + (int32_t)p_20))) & l_35))) == p_17) || p_20) << (uint16_t)g_2))) == g_3) && 0x412C2192) - (uint8_t)9) > p_21); + return p_21; +} + + + + +/* ---------------------------------------- */ +int main (void) +{ + int print_hash_value = 0; + platform_main_begin(); + func_1(); + csmith_sink_ = g_2; + csmith_sink_ = g_3; + csmith_sink_ = g_62; + csmith_sink_ = g_65; + csmith_sink_ = g_66; + platform_main_end(0,0); + return 0; +} + +/************************ statistics ************************* +XXX max struct depth: 0 +breakdown: + depth: 0, occurrence: 12 +XXX total union variables: 0 + +XXX max expression depth: 35 +breakdown: + depth: 1, occurrence: 4 + depth: 35, occurrence: 2 + +XXX total number of pointers: 0 + +XXX times a non-volatile is read: 41 +XXX times a non-volatile is write: 12 +XXX times a volatile is read: 0 +XXX times read thru a pointer: 0 +XXX times a volatile is write: 0 +XXX times written thru a pointer: 0 +XXX times a volatile is available for access: 0 +XXX percentage of non-volatile access: 100 + +XXX forward jumps: 0 +XXX backward jumps: 0 + +XXX stmts: 4 +XXX max block depth: 0 +breakdown: + depth: 0, occurrence: 4 + +XXX percentage a fresh-made variable is used: 22.6 +XXX percentage an existing variable is used: 77.4 +********************* end of statistics **********************/ + diff --git a/samples/B.c b/samples/B.c new file mode 100644 index 0000000000000000000000000000000000000000..1d0d9bf53f5d63a902a378bf13dbc0ef8bd4724f --- /dev/null +++ b/samples/B.c @@ -0,0 +1,313 @@ +/* + * This is a RANDOMLY GENERATED PROGRAM. + * + * Generator: csmith 2.3.0 + * Git version: 30dccd7 + * Options: --output B.c --no-argc --no-arrays --no-bitfields --no-checksum --no-comma-operators --no-compound-assignment --no-divs --no-float --no-structs --no-unions --no-packed-struct --no-pointers --no-builtins --no-math64 --no-longlong --no-checksum --no-safe-math --max-block-size 2 --max-funcs 4 + * Seed: 2537522965051140364 + */ + + +#define NO_LONGLONG + +#define int8_t signed char +#define uint8_t unsigned char + +#define int16_t short +#define uint16_t unsigned short + +#define int32_t int +#define uint32_t unsigned + +#define int64_t long long +#define uint64_t unsigned long long + +static inline void +platform_main_end (uint64_t x, int flag) +{ +#ifndef NOT_PRINT_CHECKSUM + if (!flag) { +#ifdef NO_PRINTF + int i; + my_puts ("checksum = "); + for (i=0; i<16; i++) { + put_hex (x & 0xf); + x >>= 4; + } + putchar ('\n'); +#else + printf ("checksum = %llx\n", x); +#endif + } +#endif +} +volatile uint32_t csmith_sink_ = 0; + +static long __undefined; + +/* --- Struct/Union Declarations --- */ +/* --- GLOBAL VARIABLES --- */ +static uint32_t g_6 = 0x964A53AA; +static uint32_t g_74 = 4294967292U; +static int16_t g_75 = 3; +static uint16_t g_79 = 1U; +static uint32_t g_95 = 0U; +static int32_t g_106 = 0x4023A1BF; +static int32_t g_116 = 0x105BA918; +static int32_t g_139 = 0xB1B1B7DE; +static uint16_t g_168 = 0x385D; +static int16_t g_204 = 5; +static uint16_t g_209 = 0x5A12; +static int32_t g_233 = (-1); +static uint16_t g_268 = 0U; +static volatile int32_t g_296 = 2;/* VOLATILE GLOBAL g_296 */ +static uint32_t g_298 = 6U; + + +/* --- FORWARD DECLARATIONS --- */ +static int32_t func_1(void); +static uint8_t func_8(uint32_t p_9, uint32_t p_10, int32_t p_11, int16_t p_12, int8_t p_13); +static int32_t func_15(uint32_t p_16, uint32_t p_17, int8_t p_18, uint16_t p_19); +static int8_t func_20(uint16_t p_21, uint32_t p_22, uint32_t p_23); + + +/* --- FUNCTIONS --- */ +/* ------------------------------------------ */ +/* + * reads : g_6 g_74 g_75 g_79 g_95 g_116 g_106 g_139 g_168 g_204 g_209 g_233 g_268 g_296 + * writes: g_75 g_79 g_95 g_106 g_74 g_116 g_139 g_168 g_204 g_209 g_233 g_268 g_298 + */ +static int32_t func_1(void) +{ /* block id: 0 */ + uint32_t l_7 = 0x3099CFE0; + int32_t l_14 = 0x7BEDEDB5; + int32_t l_36 = (-5); + int32_t l_37 = 0xD76270E7; + int32_t l_38 = (-1); + uint32_t l_39 = 0x26A9D9E1; + uint8_t l_203 = 0x4E; + uint32_t l_234 = 0U; + g_139 = ((int8_t)((int8_t)(l_7 = g_6) - (int8_t)func_8(g_6, l_14, (g_79 = func_15(g_6, g_6, func_20(((((int16_t)((int8_t)l_14 >> (int8_t)1) * (int16_t)((int8_t)((int8_t)((l_38 = ((l_37 = ((uint32_t)((uint16_t)((l_36 = g_6) > g_6) + (uint16_t)1) - (uint32_t)g_6)) > 0x53A38D06)) >= l_14) << (int8_t)0) % (int8_t)g_6)) | g_6) == l_39), l_39, g_6), g_6)), l_14, l_14)) - (int8_t)1U); + if (((uint32_t)g_6 - (uint32_t)l_36)) + { /* block id: 59 */ + int8_t l_161 = 0xCE; + g_116 = ((int16_t)(g_106 || 0xB671) >> (int16_t)9); + if ((l_37 = ((uint16_t)g_6 + (uint16_t)(9U != ((uint8_t)(g_168 = ((int8_t)(-(uint32_t)((int8_t)(((uint16_t)(((int8_t)((uint8_t)0x57 * (uint8_t)(((g_139 && (((g_106 == ((int8_t)((l_37 < l_161) < ((uint32_t)(((int32_t)(g_95 >= ((int32_t)(g_116 = (g_74 & l_161)) - (int32_t)4294967290U)) - (int32_t)1) || 9U) + (uint32_t)l_161)) >> (int8_t)4)) != l_161) <= l_38)) || 0x3E) <= 0x93B69C79)) % (int8_t)g_106) == 0U) + (uint16_t)3) || g_75) << (int8_t)6)) >> (int8_t)5)) + (uint8_t)g_75))))) + { /* block id: 64 */ + int32_t l_171 = 1; + g_106 = ((uint8_t)l_171 << (uint8_t)4); + } + else + { /* block id: 66 */ + return g_168; + } + } + else + { /* block id: 69 */ + uint8_t l_201 = 250U; + int32_t l_205 = 0x5094A51A; + int32_t l_215 = 6; + int32_t l_287 = 0xAE1B71C9; + int32_t l_297 = 0; + if (g_106) + { /* block id: 70 */ + int16_t l_184 = 0x6D60; + int16_t l_202 = (-9); + int32_t l_225 = 0x87346C39; + int32_t l_235 = 0x077A8CD5; + if (((((g_6 != ((int16_t)((uint32_t)g_139 - (uint32_t)(l_205 = (g_204 = (g_74 = ((int32_t)((int32_t)(((l_14 = ((uint8_t)(l_38 = ((int16_t)l_184 + (int16_t)g_6)) % (uint8_t)((int16_t)(l_202 = ((int16_t)((((int8_t)((uint8_t)(((int16_t)(l_37 = (((int32_t)((((uint16_t)0x42E7 << (uint16_t)(l_184 && 0xF7E8)) > (((uint8_t)l_184 >> (uint8_t)3) && g_116)) <= l_201) + (int32_t)g_79) <= g_95)) >> (int16_t)l_184) < 0x52EA) - (uint8_t)g_116) * (int8_t)l_201) & 0xBA) | 0) * (int16_t)0x072C)) - (int16_t)1U))) != l_203) == g_79) % (int32_t)g_74) + (int32_t)g_95))))) * (int16_t)l_39)) != g_139) == g_75) != g_116)) + { /* block id: 78 */ + g_116 = (-3); + } + else + { /* block id: 80 */ + for (g_95 = 0; (g_95 <= 11); g_95 += 1) + { /* block id: 83 */ + const int32_t l_208 = 0x5ADCE77D; + g_209 = (l_208 <= g_168); + } + } + l_38 = (l_235 = (!((uint8_t)((int8_t)(((l_215 <= ((int16_t)(249U ^ (((uint16_t)(((int16_t)(g_74 || (~((int8_t)(g_233 = ((((l_225 = l_37) || g_204) && g_106) ^ ((int8_t)(((int8_t)g_75 % (int8_t)((int16_t)g_139 >> (int16_t)12)) ^ (-(uint16_t)(g_79 && g_6))) >> (int8_t)g_6))) << (int8_t)3))) + (int16_t)1U) <= 0xE64536AC) * (uint16_t)g_204) == g_116)) * (int16_t)g_75)) <= 0xE7461C8C) != l_184) << (int8_t)3) >> (uint8_t)l_234))); + } + else + { /* block id: 91 */ + return l_205; + } + if (((uint32_t)g_79 - (uint32_t)g_74)) + { /* block id: 94 */ + uint32_t l_255 = 0x295DAE69; + int32_t l_269 = 0xEBDA9155; + l_205 = ((int32_t)(((int16_t)(((int8_t)((~(((int16_t)((l_205 && g_116) == ((int8_t)((uint16_t)((g_106 = (((uint8_t)((int16_t)(g_204 = l_255) * (int16_t)((l_269 = ((int32_t)((l_201 | (((0x62 | (g_209 | ((((uint16_t)((int8_t)((uint8_t)(g_268 = (((((uint8_t)((int8_t)(l_215 > l_255) % (int8_t)g_233) - (uint8_t)0xBC) < 1U) || (-1)) >= g_116)) % (uint8_t)g_95) % (int8_t)g_75) << (uint16_t)l_255) && g_79) <= (-1)))) || 0xD10CE58A) >= l_39)) & l_255) % (int32_t)l_255)) && g_139)) + (uint8_t)0x85) && 4294967295U)) | l_201) + (uint16_t)0xD827) * (int8_t)g_6)) + (int16_t)l_201) < g_233)) && 0x87BBF4A9) << (int8_t)1) == g_168) * (int16_t)g_95) && g_268) + (int32_t)l_234); + g_116 = g_139; + } + else + { /* block id: 101 */ + uint32_t l_277 = 4294967295U; + g_116 = ((uint8_t)(g_298 = ((l_297 = ((uint8_t)(~((uint16_t)(((g_106 = (l_277 = 0x7E71CC03)) >= (+(((((uint8_t)(l_203 ^ 4U) * (uint8_t)((int8_t)((uint8_t)((uint8_t)(l_205 = l_234) * (uint8_t)0U) + (uint8_t)((l_287 = (l_14 = g_139)) >= (0 <= ((uint16_t)7U >> (uint16_t)11)))) << (int8_t)1)) == ((uint32_t)((int8_t)((uint32_t)((0xECE6 > l_201) & g_168) - (uint32_t)g_6) % (int8_t)l_201) + (uint32_t)g_296)) || g_204) == 0x3FA2BBC1))) != 0x824737F3) << (uint16_t)g_168)) >> (uint8_t)l_37)) < g_204)) - (uint8_t)l_215); + } + } + return l_36; +} + + +/* ------------------------------------------ */ +/* + * reads : g_79 g_6 g_74 g_75 g_95 g_116 + * writes: g_95 g_106 g_74 g_75 g_116 + */ +static uint8_t func_8(uint32_t p_9, uint32_t p_10, int32_t p_11, int16_t p_12, int8_t p_13) +{ /* block id: 25 */ + uint32_t l_98 = 1U; + int32_t l_107 = 0x1B5D7EC3; + int32_t l_108 = 1; + uint32_t l_130 = 0U; + int8_t l_132 = 0x9F; + l_108 = ((uint8_t)p_13 % (uint8_t)(((uint8_t)((int8_t)((uint16_t)((((l_107 = ((uint16_t)p_10 << (uint16_t)((((-(int8_t)(((int8_t)(p_10 ^ ((g_95 = ((int8_t)0x6C >> (int8_t)2)) ^ ((int16_t)(((g_79 <= (-1)) >= l_98) & ((uint8_t)p_12 * (uint8_t)((g_106 = ((((((uint16_t)(((uint16_t)(~l_98) + (uint16_t)p_13) < p_11) * (uint16_t)p_10) > g_6) & 0x87233ED7) ^ g_6) >= 0xE5)) > p_11))) << (int16_t)p_10))) << (int8_t)l_98) & 2)) | p_9) || p_10) | 0x83BD))) < l_98) ^ l_98) == p_13) + (uint16_t)l_98) << (int8_t)3) << (uint8_t)4) && 1)); + if (p_9) + { /* block id: 30 */ + for (p_11 = 24; (p_11 == 14); --p_11) + { /* block id: 33 */ + if (p_9) + break; + g_106 = p_10; + } + } + else + { /* block id: 37 */ + for (g_74 = 0; (g_74 > 46); g_74++) + { /* block id: 40 */ + int16_t l_129 = 0xED4C; + for (g_75 = 0; (g_75 < 12); g_75 += 1) + { /* block id: 43 */ + const uint8_t l_131 = 0x0C; + g_106 = (0x7436 && ((((+(g_75 != ((((0x20E1BCB0 & (g_116 = g_95)) | (4 > (((int16_t)((int16_t)(((uint16_t)((uint32_t)((uint32_t)(((uint16_t)6U - (uint16_t)((((((-1) <= l_129) >= (((((l_130 = l_108) ^ 1) < 1) ^ g_95) <= l_131)) ^ p_10) && 3U) >= l_108)) == p_11) + (uint32_t)p_11) % (uint32_t)p_11) * (uint16_t)p_12) >= 4U) >> (int16_t)g_74) >> (int16_t)15) || l_129))) != 0xE99C) == l_131))) > l_132) != g_79) == l_132)); + g_116 = (((uint8_t)((int8_t)(g_116 == p_11) + (int8_t)p_10) - (uint8_t)l_131) ^ l_129); + } + } + for (p_13 = (-19); (p_13 != (-2)); p_13++) + { /* block id: 52 */ + l_108 = g_95; + return l_107; + } + } + return l_107; +} + + +/* ------------------------------------------ */ +/* + * reads : g_6 g_74 g_75 + * writes: g_75 + */ +static int32_t func_15(uint32_t p_16, uint32_t p_17, int8_t p_18, uint16_t p_19) +{ /* block id: 10 */ + uint32_t l_50 = 0x2097756E; + int32_t l_76 = (-1); + if ((l_50 = (p_18 < g_6))) + { /* block id: 12 */ + int8_t l_51 = (-4); + int32_t l_56 = (-3); + int32_t l_77 = 0xD7B9758A; + l_51 = g_6; + l_77 = (l_76 = ((uint16_t)0x0270 * (uint16_t)((int32_t)(g_6 > ((((l_56 = g_6) ^ ((uint8_t)g_6 >> (uint8_t)(p_18 = (g_75 = (0xBB <= (((uint8_t)0xB0 * (uint8_t)(((int8_t)((int8_t)(((int8_t)(~(((((((int16_t)((uint8_t)((uint16_t)(p_19 = (p_17 > l_51)) << (uint16_t)13) - (uint8_t)p_18) << (int16_t)l_50) < g_6) ^ 0x88841F9F) && g_74) > p_16) && 0x6485393F)) + (int8_t)g_74) >= 4U) * (int8_t)p_16) >> (int8_t)1) & p_17)) == p_16)))))) & l_50) && g_75)) - (int32_t)g_74))); + } + else + { /* block id: 20 */ + uint32_t l_78 = 4294967293U; + l_78 = 0xA659E808; + } + return p_18; +} + + +/* ------------------------------------------ */ +/* + * reads : g_6 + * writes: + */ +static int8_t func_20(uint16_t p_21, uint32_t p_22, uint32_t p_23) +{ /* block id: 5 */ + uint16_t l_46 = 0x91CB; + int32_t l_47 = 0x1CAF1E2C; + int32_t l_48 = 0xE7BAA9D7; + int32_t l_49 = 0x3FD90538; + l_49 = (((int16_t)(p_22 != (((int8_t)((l_46 ^ ((((((l_47 = 1U) != ((g_6 >= l_46) >= (0U & (g_6 | p_23)))) < ((l_48 = (0x24B13B10 <= p_23)) > p_23)) == p_23) ^ l_46) || g_6)) & p_23) % (int8_t)g_6) | 0)) >> (int16_t)l_46) ^ l_46); + return g_6; +} + + + + +/* ---------------------------------------- */ +int main (void) +{ + int print_hash_value = 0; + platform_main_begin(); + func_1(); + csmith_sink_ = g_6; + csmith_sink_ = g_74; + csmith_sink_ = g_75; + csmith_sink_ = g_79; + csmith_sink_ = g_95; + csmith_sink_ = g_106; + csmith_sink_ = g_116; + csmith_sink_ = g_139; + csmith_sink_ = g_168; + csmith_sink_ = g_204; + csmith_sink_ = g_209; + csmith_sink_ = g_233; + csmith_sink_ = g_268; + csmith_sink_ = g_296; + csmith_sink_ = g_298; + platform_main_end(0,0); + return 0; +} + +/************************ statistics ************************* +XXX max struct depth: 0 +breakdown: + depth: 0, occurrence: 55 +XXX total union variables: 0 + +XXX max expression depth: 40 +breakdown: + depth: 1, occurrence: 34 + depth: 2, occurrence: 9 + depth: 3, occurrence: 2 + depth: 5, occurrence: 1 + depth: 21, occurrence: 1 + depth: 25, occurrence: 1 + depth: 29, occurrence: 1 + depth: 30, occurrence: 2 + depth: 31, occurrence: 2 + depth: 34, occurrence: 1 + depth: 36, occurrence: 1 + depth: 40, occurrence: 1 + +XXX total number of pointers: 0 + +XXX times a non-volatile is read: 210 +XXX times a non-volatile is write: 65 +XXX times a volatile is read: 1 +XXX times read thru a pointer: 0 +XXX times a volatile is write: 0 +XXX times written thru a pointer: 0 +XXX times a volatile is available for access: 2 +XXX percentage of non-volatile access: 99.6 + +XXX forward jumps: 0 +XXX backward jumps: 0 + +XXX stmts: 38 +XXX max block depth: 4 +breakdown: + depth: 0, occurrence: 10 + depth: 1, occurrence: 10 + depth: 2, occurrence: 13 + depth: 3, occurrence: 4 + depth: 4, occurrence: 1 + +XXX percentage a fresh-made variable is used: 20.5 +XXX percentage an existing variable is used: 79.5 +********************* end of statistics **********************/ + diff --git a/samples/C.c b/samples/C.c new file mode 100644 index 0000000000000000000000000000000000000000..d042aaa05e05834750aba2da51a7877f6c65088a --- /dev/null +++ b/samples/C.c @@ -0,0 +1,143 @@ +/* + * This is a RANDOMLY GENERATED PROGRAM. + * + * Generator: csmith 2.3.0 + * Git version: 30dccd7 + * Options: --output C.c --no-argc --no-arrays --no-bitfields --no-checksum --no-comma-operators --no-compound-assignment --no-divs --no-float --no-structs --no-unions --no-packed-struct --no-pointers --no-builtins --no-math64 --no-longlong --no-checksum --no-safe-math --max-block-size 4 --max-funcs 2 + * Seed: 4773875934381650930 + */ + + +#define NO_LONGLONG + +#define int8_t signed char +#define uint8_t unsigned char + +#define int16_t short +#define uint16_t unsigned short + +#define int32_t int +#define uint32_t unsigned + +#define int64_t long long +#define uint64_t unsigned long long + +static inline void +platform_main_end (uint64_t x, int flag) +{ +#ifndef NOT_PRINT_CHECKSUM + if (!flag) { +#ifdef NO_PRINTF + int i; + my_puts ("checksum = "); + for (i=0; i<16; i++) { + put_hex (x & 0xf); + x >>= 4; + } + putchar ('\n'); +#else + printf ("checksum = %llx\n", x); +#endif + } +#endif +} +volatile uint32_t csmith_sink_ = 0; + +static long __undefined; + +/* --- Struct/Union Declarations --- */ +/* --- GLOBAL VARIABLES --- */ +static uint8_t g_2 = 0xFE; +static int16_t g_6 = 0xAB60; +static uint8_t g_40 = 1U; +static int32_t g_43 = 0x933DF47B; +static int32_t g_44 = 0x6C987F9B; + + +/* --- FORWARD DECLARATIONS --- */ +static const uint8_t func_1(void); +static const uint16_t func_12(const uint32_t p_13, int32_t p_14, uint32_t p_15, int32_t p_16, int16_t p_17); + + +/* --- FUNCTIONS --- */ +/* ------------------------------------------ */ +/* + * reads : g_2 + * writes: g_6 g_40 g_43 g_44 + */ +static const uint8_t func_1(void) +{ /* block id: 0 */ + uint16_t l_3 = 0x13DC; + int32_t l_11 = 1; + g_44 = (g_43 = ((l_3 = g_2) == ((l_11 = (65535U >= (((int16_t)((g_6 = g_2) <= ((uint32_t)((l_11 ^ l_11) || (((l_11 | func_12(l_11, ((((((uint16_t)((uint16_t)(((int16_t)g_2 - (int16_t)((uint8_t)l_11 * (uint8_t)3U)) ^ g_2) - (uint16_t)l_11) + (uint16_t)l_11) >= 4294967294U) & g_2) > (-3)) > 0x58B39D20), l_11, l_11, g_2)) ^ l_11) != g_2)) - (uint32_t)l_11)) >> (int16_t)g_2) && l_11))) >= 7U))); + return l_11; +} + + +/* ------------------------------------------ */ +/* + * reads : g_2 + * writes: g_40 + */ +static const uint16_t func_12(const uint32_t p_13, int32_t p_14, uint32_t p_15, int32_t p_16, int16_t p_17) +{ /* block id: 3 */ + int32_t l_32 = 0x7E784B52; + int32_t l_39 = (-9); + int32_t l_41 = 0x46221EB1; + int32_t l_42 = 0; + l_42 = ((g_2 || g_2) | ((((uint8_t)((int8_t)g_2 >> (int8_t)1) - (uint8_t)(l_41 = (p_15 ^ ((uint32_t)(g_2 <= (l_32 <= (((uint8_t)((uint16_t)((0x02 & ((g_40 = ((int16_t)(((((p_17 = 0xE8EA) > (((((l_39 = (g_2 || g_2)) || 0x96DAD8BE) ^ 0xE9F8) <= l_32) && l_39)) < g_2) <= p_14) == g_2) - (int16_t)1)) & p_13)) >= l_32) + (uint16_t)g_2) << (uint8_t)l_32) != l_32))) - (uint32_t)l_32)))) > g_2) == p_15)); + return g_2; +} + + + + +/* ---------------------------------------- */ +int main (void) +{ + int print_hash_value = 0; + platform_main_begin(); + func_1(); + csmith_sink_ = g_2; + csmith_sink_ = g_6; + csmith_sink_ = g_40; + csmith_sink_ = g_43; + csmith_sink_ = g_44; + platform_main_end(0,0); + return 0; +} + +/************************ statistics ************************* +XXX max struct depth: 0 +breakdown: + depth: 0, occurrence: 11 +XXX total union variables: 0 + +XXX max expression depth: 31 +breakdown: + depth: 1, occurrence: 4 + depth: 31, occurrence: 2 + +XXX total number of pointers: 0 + +XXX times a non-volatile is read: 43 +XXX times a non-volatile is write: 10 +XXX times a volatile is read: 0 +XXX times read thru a pointer: 0 +XXX times a volatile is write: 0 +XXX times written thru a pointer: 0 +XXX times a volatile is available for access: 0 +XXX percentage of non-volatile access: 100 + +XXX forward jumps: 0 +XXX backward jumps: 0 + +XXX stmts: 4 +XXX max block depth: 0 +breakdown: + depth: 0, occurrence: 4 + +XXX percentage a fresh-made variable is used: 20.8 +XXX percentage an existing variable is used: 79.2 +********************* end of statistics **********************/ + diff --git a/samples/D.c b/samples/D.c new file mode 100644 index 0000000000000000000000000000000000000000..46e8c8740748308418e7ed6dfb4c3eeb659e9c5e --- /dev/null +++ b/samples/D.c @@ -0,0 +1,285 @@ +/* + * This is a RANDOMLY GENERATED PROGRAM. + * + * Generator: csmith 2.3.0 + * Git version: 30dccd7 + * Options: --output D.c --no-argc --no-arrays --no-bitfields --no-checksum --no-comma-operators --no-compound-assignment --no-divs --no-float --no-structs --no-unions --no-packed-struct --no-pointers --no-builtins --no-math64 --no-longlong --no-checksum --no-safe-math --max-block-size 4 --max-funcs 4 + * Seed: 9511787161989909427 + */ + + +#define NO_LONGLONG + +#define int8_t signed char +#define uint8_t unsigned char + +#define int16_t short +#define uint16_t unsigned short + +#define int32_t int +#define uint32_t unsigned + +#define int64_t long long +#define uint64_t unsigned long long + +static inline void +platform_main_end (uint64_t x, int flag) +{ +#ifndef NOT_PRINT_CHECKSUM + if (!flag) { +#ifdef NO_PRINTF + int i; + my_puts ("checksum = "); + for (i=0; i<16; i++) { + put_hex (x & 0xf); + x >>= 4; + } + putchar ('\n'); +#else + printf ("checksum = %llx\n", x); +#endif + } +#endif +} +volatile uint32_t csmith_sink_ = 0; + +static long __undefined; + +/* --- Struct/Union Declarations --- */ +/* --- GLOBAL VARIABLES --- */ +static volatile int8_t g_2 = 0xF6;/* VOLATILE GLOBAL g_2 */ +static volatile uint32_t g_3 = 4294967292U;/* VOLATILE GLOBAL g_3 */ +static int32_t g_5 = 0x5BF25CBD; +static uint16_t g_26 = 0xF215; +static uint32_t g_56 = 9U; +static uint8_t g_57 = 0x2D; +static int32_t g_108 = 3; +static const volatile uint8_t g_112 = 251U;/* VOLATILE GLOBAL g_112 */ +static uint8_t g_124 = 0x67; +static int8_t g_196 = 0xBB; +static int32_t g_208 = 0x85F6FCF5; +static uint8_t g_210 = 0U; +static uint32_t g_216 = 1U; + + +/* --- FORWARD DECLARATIONS --- */ +static uint16_t func_1(void); +static uint16_t func_10(uint32_t p_11); +static uint32_t func_12(int16_t p_13, int32_t p_14, uint16_t p_15, int32_t p_16, uint32_t p_17); +static const uint8_t func_24(int32_t p_25); + + +/* --- FUNCTIONS --- */ +/* ------------------------------------------ */ +/* + * reads : g_2 g_3 g_5 g_26 g_56 g_57 g_112 g_108 g_124 g_208 g_210 + * writes: g_3 g_5 g_26 g_56 g_57 g_108 g_124 g_196 g_210 g_216 + */ +static uint16_t func_1(void) +{ /* block id: 0 */ + int32_t l_4 = 0x79536444; + g_3 = g_2; + g_5 = l_4; + l_4 = (((int16_t)((uint16_t)func_10(func_12(((int32_t)g_3 % (int32_t)((g_5 || ((l_4 && g_5) >= g_5)) ^ g_5)), (g_108 = (((uint8_t)l_4 >> (uint8_t)(((uint8_t)func_24((((g_26 = (0x4F || (0 != g_5))) | g_5) && 0xB002891B)) >> (uint8_t)1) ^ l_4)) < g_5)), l_4, l_4, g_5)) - (uint16_t)l_4) + (int16_t)l_4) >= 0x75CCD888); + return l_4; +} + + +/* ------------------------------------------ */ +/* + * reads : g_3 g_57 g_124 g_208 g_5 g_56 g_112 g_26 g_210 g_108 + * writes: g_196 g_210 g_5 g_26 g_216 + */ +static uint16_t func_10(uint32_t p_11) +{ /* block id: 71 */ + uint16_t l_191 = 0xE18E; + int8_t l_209 = 0x3C; + int32_t l_211 = 0xD98C0EBE; + int32_t l_212 = 0x9A0713E2; + int16_t l_233 = 0xB477; + l_212 = ((uint16_t)g_3 >> (uint16_t)(g_26 = ((g_5 = (l_211 = ((uint16_t)((l_191 & g_57) | ((((((g_57 != (((uint16_t)l_191 - (uint16_t)(g_196 = 0x6ADF)) < (((int32_t)((int8_t)(g_210 = (((int16_t)(((g_124 == ((uint16_t)((((((p_11 && (((int8_t)(((((+0x99) & (-6)) && p_11) == g_208) <= 1U) >> (int8_t)g_5) <= p_11)) & g_57) || l_209) >= p_11) & g_124) < l_209) >> (uint16_t)p_11)) && 0x764A) >= 4294967289U) << (int16_t)p_11) == g_56)) + (int8_t)g_5) - (int32_t)0xAB7E1749) <= p_11))) && g_208) ^ l_209) ^ l_191) != l_191) > p_11)) + (uint16_t)g_5))) || l_211))); + l_211 = ((int8_t)((+(((9 > (1U >= (((g_208 && (((g_216 = 4294967295U) || (-2)) || (g_57 && g_112))) != ((uint8_t)((int32_t)((int16_t)(l_212 = ((((((0 == g_56) < l_212) && l_191) != g_26) >= 0xD89F8139) != g_210)) >> (int16_t)l_211) - (int32_t)1) >> (uint8_t)l_209)) >= l_209))) ^ g_208) >= 0)) ^ l_211) >> (int8_t)1); + l_211 = (p_11 ^ 0x73); + l_212 = (((((int16_t)g_108 + (int16_t)(l_211 < ((uint8_t)(((uint16_t)(((int8_t)l_191 % (int8_t)g_124) & (((int16_t)l_233 - (int16_t)p_11) >= g_26)) << (uint16_t)0) == ((uint16_t)(~1U) - (uint16_t)l_211)) + (uint8_t)l_209))) >= g_56) & 1) <= 0xE4F4); + return p_11; +} + + +/* ------------------------------------------ */ +/* + * reads : g_112 g_56 g_26 g_108 g_124 g_5 g_57 g_2 + * writes: g_124 g_5 + */ +static uint32_t func_12(int16_t p_13, int32_t p_14, uint16_t p_15, int32_t p_16, uint32_t p_17) +{ /* block id: 50 */ + int8_t l_109 = 0xC3; + int32_t l_117 = 1; + int32_t l_125 = 0xCEED78FB; + int32_t l_138 = 0x81864D21; + int32_t l_139 = 0x0732D581; + int32_t l_140 = 0; + const uint32_t l_147 = 4294967295U; + int32_t l_185 = 1; + uint32_t l_186 = 0x2304E23E; +lbl_154: + g_5 = ((l_109 = 65529U) ^ (((uint8_t)(g_112 > ((g_56 | ((int8_t)(l_117 = ((uint8_t)((l_117 & l_117) < ((uint32_t)(0x12 | l_117) % (uint32_t)((int8_t)p_15 << (int8_t)4))) * (uint8_t)((uint32_t)(g_124 = (p_15 | 0x16A80D17)) % (uint32_t)g_26))) << (int8_t)0)) >= l_125)) % (uint8_t)(-1)) | p_16)); + if ((l_125 = ((uint16_t)((uint32_t)((l_125 & ((l_140 = (p_13 || (g_112 >= ((uint8_t)(((l_117 = (l_117 ^ (g_124 = ((((g_108 <= g_26) == (((uint8_t)((g_26 && (l_139 = ((l_138 = ((((uint8_t)((((int16_t)(g_124 == ((g_5 != p_17) > (-1))) % (int16_t)p_16) && 0) & 0x31) % (uint8_t)0xC3) & 8) > p_15)) && l_117))) != g_57) - (uint8_t)0) <= l_109)) > g_5) || l_109)))) & (-1)) >= 2) * (uint8_t)l_125)))) | l_109)) != g_56) - (uint32_t)(-1)) << (uint16_t)8))) + { /* block id: 61 */ + return g_56; + } + else + { /* block id: 63 */ + int16_t l_152 = 1; + int32_t l_153 = 0x11538544; + l_153 = (((int16_t)(((p_15 == 0xCE37C20D) && ((p_14 != p_17) == (((uint8_t)(((g_57 | (((int16_t)l_147 << (int16_t)11) && ((int16_t)g_108 >> (int16_t)2))) && (((((uint8_t)g_124 * (uint8_t)g_56) == 0xF4) > l_152) && p_16)) < p_17) * (uint8_t)p_14) < 0U))) && p_17) << (int16_t)g_26) | g_2); + } + if (g_57) + goto lbl_154; + l_138 = ((uint8_t)(l_140 = (g_2 | (((int32_t)((uint16_t)((uint16_t)((int8_t)((int32_t)g_56 - (int32_t)(((l_117 = ((int16_t)((int8_t)((uint32_t)g_108 - (uint32_t)(1 && ((int8_t)(((~((((255U && 4U) >= ((uint8_t)p_14 * (uint8_t)(((int8_t)p_15 >> (int8_t)4) ^ ((uint8_t)(((-(uint8_t)(((int16_t)(p_14 > l_138) >> (int16_t)l_138) == p_14)) || l_117) & l_185) >> (uint8_t)p_15)))) != 0xBF24FAA9) && l_186)) && 65528U) & 0U) << (int8_t)g_56))) * (int8_t)l_147) * (int16_t)1U)) && (-1)) & g_124)) % (int8_t)l_125) >> (uint16_t)6) - (uint16_t)65535U) - (int32_t)0x622BFC7E) ^ 0x20E4BD68))) + (uint8_t)l_125); + return g_124; +} + + +/* ------------------------------------------ */ +/* + * reads : g_26 g_56 g_5 g_57 + * writes: g_26 g_56 g_57 + */ +static const uint8_t func_24(int32_t p_25) +{ /* block id: 4 */ + const uint32_t l_29 = 0xDB88DCFD; + uint32_t l_60 = 0x6A8B5F6E; + int32_t l_80 = (-1); + int32_t l_104 = (-5); + uint16_t l_105 = 65535U; + for (g_26 = (-23); (g_26 == 21); g_26 += 9) + { /* block id: 7 */ + int16_t l_54 = 0xD42F; + int32_t l_65 = 0x9A4A91A3; + uint32_t l_78 = 0U; + if (p_25) + { /* block id: 8 */ + uint32_t l_30 = 1U; + l_30 = l_29; + for (p_25 = 0; (p_25 < 17); p_25 += 9) + { /* block id: 12 */ + uint32_t l_33 = 0U; + uint32_t l_51 = 0U; + if (((l_33 = 1) ^ ((int8_t)0x5C + (int8_t)(+l_30)))) + { /* block id: 14 */ + uint32_t l_52 = 0x0A82B5DF; + int32_t l_53 = 1; + for (l_33 = (-30); (l_33 == 7); l_33++) + { /* block id: 17 */ + l_53 = (l_52 = ((int16_t)((int16_t)(((int8_t)0xEE + (int8_t)0x3F) && ((g_26 && ((((int32_t)(l_30 > g_26) + (int32_t)((int16_t)(0x35D5 < ((uint8_t)0x0B << (uint8_t)2)) + (int16_t)l_29)) > p_25) != l_51)) < g_26)) << (int16_t)6) + (int16_t)0x9BA1)); + if (g_26) + continue; + } + return p_25; + } + else + { /* block id: 23 */ + int32_t l_55 = 0x1FE01AF0; + g_56 = (l_55 = l_54); + return g_56; + } + } + if (p_25) + continue; + p_25 = (((l_30 <= (((0xD6AE4D7D >= l_54) | ((g_57 = g_5) == ((uint16_t)l_29 - (uint16_t)p_25))) == l_60)) >= ((((int16_t)p_25 >> (int16_t)g_56) != p_25) >= l_30)) == l_30); + } + else + { /* block id: 32 */ + int32_t l_77 = 0; + int32_t l_79 = 0xE18B70F7; + l_65 = ((uint8_t)(g_57 = 0x03) << (uint8_t)g_26); + l_80 = (((l_79 = ((l_65 = (0x7C && l_65)) > (((uint8_t)(g_57 = 250U) >> (uint8_t)(5 ^ (0x08 & (((p_25 >= (((uint8_t)(0x59A0 <= ((+(0xB1 | (((uint16_t)((int8_t)l_29 >> (int8_t)p_25) * (uint16_t)g_26) <= l_77))) >= p_25)) * (uint8_t)l_77) != g_56)) ^ l_77) != l_78)))) < g_56))) != 0xF3BEA0A5) == p_25); + } + } + p_25 = (0 | ((((int16_t)(((int8_t)p_25 % (int8_t)((int8_t)(~0x0E) - (int8_t)((int16_t)0x56B3 * (int16_t)((uint16_t)0U << (uint16_t)13)))) || g_5) - (int16_t)(-10)) || (((int16_t)(l_80 = (0 < (((-(uint16_t)(!(((((int8_t)((int8_t)(((int8_t)(((int16_t)((g_57 == p_25) != p_25) * (int16_t)l_29) >= 0x2441043A) - (int8_t)3) && p_25) % (int8_t)255U) - (int8_t)g_26) | l_60) > (-1)) || g_56))) && 0x84107A49) >= l_104))) * (int16_t)65535U) <= 0xE8DF01DD)) > l_105)); + for (g_26 = (-22); (g_26 <= 30); ++g_26) + { /* block id: 45 */ + p_25 = g_56; + } + return p_25; +} + + + + +/* ---------------------------------------- */ +int main (void) +{ + int print_hash_value = 0; + platform_main_begin(); + func_1(); + csmith_sink_ = g_2; + csmith_sink_ = g_3; + csmith_sink_ = g_5; + csmith_sink_ = g_26; + csmith_sink_ = g_56; + csmith_sink_ = g_57; + csmith_sink_ = g_108; + csmith_sink_ = g_112; + csmith_sink_ = g_124; + csmith_sink_ = g_196; + csmith_sink_ = g_208; + csmith_sink_ = g_210; + csmith_sink_ = g_216; + platform_main_end(0,0); + return 0; +} + +/************************ statistics ************************* +XXX max struct depth: 0 +breakdown: + depth: 0, occurrence: 46 +XXX total union variables: 0 + +XXX max expression depth: 40 +breakdown: + depth: 1, occurrence: 33 + depth: 2, occurrence: 6 + depth: 3, occurrence: 1 + depth: 4, occurrence: 1 + depth: 13, occurrence: 1 + depth: 14, occurrence: 1 + depth: 15, occurrence: 1 + depth: 19, occurrence: 1 + depth: 20, occurrence: 1 + depth: 23, occurrence: 1 + depth: 24, occurrence: 1 + depth: 26, occurrence: 1 + depth: 27, occurrence: 1 + depth: 33, occurrence: 1 + depth: 36, occurrence: 1 + depth: 40, occurrence: 1 + +XXX total number of pointers: 0 + +XXX times a non-volatile is read: 187 +XXX times a non-volatile is write: 50 +XXX times a volatile is read: 8 +XXX times read thru a pointer: 0 +XXX times a volatile is write: 1 +XXX times written thru a pointer: 0 +XXX times a volatile is available for access: 23 +XXX percentage of non-volatile access: 96.3 + +XXX forward jumps: 0 +XXX backward jumps: 1 + +XXX stmts: 35 +XXX max block depth: 5 +breakdown: + depth: 0, occurrence: 18 + depth: 1, occurrence: 4 + depth: 2, occurrence: 6 + depth: 3, occurrence: 1 + depth: 4, occurrence: 4 + depth: 5, occurrence: 2 + +XXX percentage a fresh-made variable is used: 19.2 +XXX percentage an existing variable is used: 80.8 +********************* end of statistics **********************/ + diff --git a/samples/add-1-float.c b/samples/add-1-float.c new file mode 100644 index 0000000000000000000000000000000000000000..6a6111e9835477f752111435cad46eb2909094fa --- /dev/null +++ b/samples/add-1-float.c @@ -0,0 +1,14 @@ +int main() { + float a = 1; a++; // a={2} + float b = 2; b++; // b={3} + float c; + c += 1; // c=T + + if(c == a){ // c=a={2} + c += 1; // c={3} + a = c; // a={3} + } + + // a={2,3} + return 0; // ret={5,6} +} diff --git a/samples/add-1.c b/samples/add-1.c index fb657e61358d269125a2868582e16d4f8a5bd9a7..c0d717740c1e2fefe43bb95ce73f6041820b117b 100644 --- a/samples/add-1.c +++ b/samples/add-1.c @@ -11,4 +11,9 @@ int test() { // a={2,3} return a + b; // ret={5,6} -} \ No newline at end of file +} + +int main() { + int x = test(); + return 0; +} diff --git a/samples/add-2.c b/samples/add-2.c index 6db9f4d8a2bf9341807039e203d31b78345ba60c..ae9ab5a78429d8a481da531f70cc5af5e32f08fa 100644 --- a/samples/add-2.c +++ b/samples/add-2.c @@ -1,4 +1,4 @@ -int test() { +int main() { int a = 1; a++; // a={2} int b = 2; b++; // b={3} int c = 3; c++; // c={4} @@ -10,4 +10,4 @@ int test() { // a={2} return a + b; // ret={5} -} \ No newline at end of file +} diff --git a/samples/add-3.c b/samples/add-3.c index a2ef595ce06adfc15692a1a7a2dcd8d6d7ae1758..47b96d22d8425a71fea8358b33645469ec4fd991 100644 --- a/samples/add-3.c +++ b/samples/add-3.c @@ -1,4 +1,4 @@ -int test() { +int main() { int a = 1; a++; // a={2} int b = 2; b++; // b={3} int c = 3; c++; // c={4} @@ -10,4 +10,4 @@ int test() { // a={5} return a + b; // ret={8} -} \ No newline at end of file +} diff --git a/samples/add-4.c b/samples/add-4.c index b781031a859d3c9c1cf817a58fcfed046a642c94..c9c86d95546054f77bf1624056c8a9bf5761c10a 100644 --- a/samples/add-4.c +++ b/samples/add-4.c @@ -1,4 +1,4 @@ -int test() { +int main() { int a = 1; a++; // a={2} int b = 2; b++; // b={3} int c = 3; c++; // c={4} @@ -10,4 +10,4 @@ int test() { // a={12} return a + b; // ret={5} -} \ No newline at end of file +} diff --git a/samples/basic_function.c b/samples/basic_function.c new file mode 100644 index 0000000000000000000000000000000000000000..d4544b7c23593691ed808b0d57b51a0b52434bac --- /dev/null +++ b/samples/basic_function.c @@ -0,0 +1,13 @@ +int incr(int a) { + a++; + return a; +} + +int main() { + int a = 1; + int b = 0; + a++; + b++; + b = incr(a); + return a + b; +} \ No newline at end of file diff --git a/samples/example-1.c b/samples/example-1.c new file mode 100644 index 0000000000000000000000000000000000000000..05ed2993ea02b806e77c32f3097941f7d1a3f801 --- /dev/null +++ b/samples/example-1.c @@ -0,0 +1,20 @@ +volatile int x1,x2,x3; +volatile int condition; + + +void P() { + if (condition) { + x1 = x1 + x2 + 1; + x3 = x3 + 1; + P(); + x1 = x1 - x2; + } +} + + +int main() { + x2 = x1; + x3 = 0; + P(); + x1 = 1 - x2 - x3; +} \ No newline at end of file diff --git a/samples/for-loop-1.c b/samples/for-loop-1.c new file mode 100644 index 0000000000000000000000000000000000000000..a6a1022df9d1ae19b25ba13caa9918c1726b93fd --- /dev/null +++ b/samples/for-loop-1.c @@ -0,0 +1,33 @@ + + +int x_arr[100]; +int y_arr[100]; + +int res[100]; +int *res_ptr; + +int *x_ptr; +int *y_ptr; + +void access() { + int x = *x_ptr; + int y = *y_ptr; + *res_ptr = x + y; + res_ptr++; +} + + +int main() { + int i; + x_ptr = &x_arr[0]; + y_ptr = &y_arr[0]; + res_ptr = &res[0]; + + for (i=0; i<100; i++) { + access(); + x_ptr++; + y_ptr++; + } +} + +// (x_ptr = 8 * i + x_arr[0]) && (Y_ptr = 4 * i + y_arr[0]) \ No newline at end of file diff --git a/samples/for.c b/samples/for.c index 013a51af9504a2c01c019ed8652d17b1ad380e9c..2e19bdaeae5f0536ed22f81b5535aefe2fa63cc3 100644 --- a/samples/for.c +++ b/samples/for.c @@ -1,16 +1,14 @@ -#include -#include // test program: // simple loop +int xex(int b) { + return b + 9; +} -int main(int argc, char const *argv[]) { - int x = 42; - //x += 7; - int y = 2; - - for(int i=0; i + +int a(int in) { + printf("In a()\n"); + int a_val = in - 1; + if (in == 0) { + a_val = 4; + return a_val; + } + a_val = a_val - 3; + if (a_val > 2) { + return 2; + } else { + if ( a_val == in) { + return 0; + } + return a_val; + } +} + +int b(int in) { + int b_val = in +3; + return b_val; +} + +int main(int argc, char const *argv[]) { + int x = a(4); + int y = b(x); + return y+1; +} diff --git a/samples/func-test-2.c b/samples/func-test-2.c new file mode 100644 index 0000000000000000000000000000000000000000..4cfd628ca63db1e01e6de1546421fb42a9ddd267 --- /dev/null +++ b/samples/func-test-2.c @@ -0,0 +1,10 @@ +int a(int input) { + return 17; +} + +int main() { + int x = 10; + x++; + int y = a(x); + return y; +} diff --git a/samples/func-test-3.c b/samples/func-test-3.c new file mode 100644 index 0000000000000000000000000000000000000000..5f5e97be405ec29e1cf8df26b29c25ec731853fa --- /dev/null +++ b/samples/func-test-3.c @@ -0,0 +1,20 @@ +int b(int x, int y) { + if (y == 0) { + return x; + } + return y - x; +} + +int a(int x, int y, int z) { + int l = x + y; + int m = b(3, z); + return l - m; +} + +int main(int argc, char const* argv[]) { + int l = 4; + int n = 2; + int x = a(n, l, 1); // a(2,4,1) = 2+4-b(3,1) = 6-(1-3) = 8 + int y = b(x, l); // b(8, 4) = 4-8 = -4 + return x + y; // 8-4 = 4 +} diff --git a/samples/func-test-4.c b/samples/func-test-4.c new file mode 100644 index 0000000000000000000000000000000000000000..232885ccaef43cacd113b377c0e3442d788906b4 --- /dev/null +++ b/samples/func-test-4.c @@ -0,0 +1,11 @@ +int a(int b, int c) { + if (b > c) { + return c; + } + return b; +} + +int main(int argc, char const* argv[]) { + int x = a(3, 4); + return 0; +} diff --git a/samples/func-test-5.c b/samples/func-test-5.c new file mode 100644 index 0000000000000000000000000000000000000000..b1a3e780d6a2335411a86fb40b05e70c9bbbb562 --- /dev/null +++ b/samples/func-test-5.c @@ -0,0 +1,12 @@ +int a(int in) { + in++; + return in; +} + +int main() { + int x_1 = a(0); // 1 + int x_2 = a(1); // 2 + int x_3 = a(2); // 3 + int x_4 = a(3); // 4 + return x_1 + x_2 + x_3 + x_4; // 10 +} diff --git a/samples/func-test-for.c b/samples/func-test-for.c new file mode 100644 index 0000000000000000000000000000000000000000..b79fad9cdea8058676a1f0d5e48e17ffc277b185 --- /dev/null +++ b/samples/func-test-for.c @@ -0,0 +1,26 @@ +int a(int in) { + in++; + return in; +} + +int main() { + int x = 0; + x++; + for (int i = 0; i < 4; i++) { + switch(i) { + case 0: + x += a(i); + break; + case 1: + x += a(i); + break; + case 2: + x += a(i); + break; + case 3: + x += a(i); + break; + } + } + return x; +} diff --git a/samples/func-test-rec-endless.c b/samples/func-test-rec-endless.c new file mode 100644 index 0000000000000000000000000000000000000000..5d7cd6eca70d4934acf0ec9c19618836f434bdcb --- /dev/null +++ b/samples/func-test-rec-endless.c @@ -0,0 +1,8 @@ +int a(int in) { + return a(in+1); +} + +int main() { + int x = a(5); + return x; +} diff --git a/samples/func-test-rec.c b/samples/func-test-rec.c new file mode 100644 index 0000000000000000000000000000000000000000..9ca10c7642301b9170d39863e88bf4f5cbb93a2b --- /dev/null +++ b/samples/func-test-rec.c @@ -0,0 +1,15 @@ +#include + +int a(int in) { + if (in == 0) { + return in; + } + int y = a(in - 1); + return y; +} + + +int main(int argc, char const *argv[]) { + int x = a(3); + return x; +} diff --git a/samples/func-test-switch.c b/samples/func-test-switch.c new file mode 100644 index 0000000000000000000000000000000000000000..b79fad9cdea8058676a1f0d5e48e17ffc277b185 --- /dev/null +++ b/samples/func-test-switch.c @@ -0,0 +1,26 @@ +int a(int in) { + in++; + return in; +} + +int main() { + int x = 0; + x++; + for (int i = 0; i < 4; i++) { + switch(i) { + case 0: + x += a(i); + break; + case 1: + x += a(i); + break; + case 2: + x += a(i); + break; + case 3: + x += a(i); + break; + } + } + return x; +} diff --git a/samples/gcd.c b/samples/gcd.c index c9623ccb7792bac1566dc42242d3486206923a3d..69ec38623330050c96b7c0ed4229a629571a52b5 100644 --- a/samples/gcd.c +++ b/samples/gcd.c @@ -11,3 +11,8 @@ int gcd() { // expected solution 89 return a; } + +int main() { + int x = gcd(); + return x; +} diff --git a/samples/goto.c b/samples/goto.c index f039aa435bc2cb0ee07511ac47cf86ff1e6d7fa2..9aa14d403f71cfebb833693423b6ffd22c60cede 100644 --- a/samples/goto.c +++ b/samples/goto.c @@ -1,8 +1,8 @@ -int test(){ +int main(){ int i = 0; loop: i++; if(i<10) goto loop; return i; -} \ No newline at end of file +} diff --git a/src/abstract_state.h b/src/abstract_state.h new file mode 100644 index 0000000000000000000000000000000000000000..b6abc2b23ca4b06db2b3e82db1e146f6710dccd4 --- /dev/null +++ b/src/abstract_state.h @@ -0,0 +1,74 @@ +#pragma once + +#include "llvm/IR/CFG.h" + +namespace pcpo { + +class AbstractStateDummy { +public: + // This has to initialise the state to bottom. + AbstractStateDummy() = default; + + // Creates a copy of the state. Using the default copy-constructor should be fine here, but if + // some members do something weird you maybe want to implement this as + // AbstractState().merge(state) + AbstractStateDummy(AbstractStateDummy const& state) = default; + + // Initialise the state to the incoming state of the function. This should do something like + // assuming the parameters can be anything. + explicit AbstractStateDummy(llvm::Function const& f) {} + + // Initialise the state of a function call with parameters of the caller. + // This is the "enter" function as described in "Compiler Design: Analysis and Transformation" + explicit AbstractStateDummy(llvm::Function const* callee_func, AbstractStateDummy const& state, + llvm::CallInst const* call) {} + + // Apply functions apply the changes needed to reflect executing the instructions in the basic block. Before + // this operation is called, the state is the one upon entering bb, afterwards it should be (an + // upper bound of) the state leaving the basic block. + // predecessors contains the outgoing state for all the predecessors, in the same order as they + // are listed in llvm::predecessors(bb). + + // Applies instructions within the PHI node, needed for merging + void applyPHINode(llvm::BasicBlock const& bb, std::vector const& pred_values, + llvm::Instruction const& inst) {}; + + // This is the "combine" function as described in "Compiler Design: Analysis and Transformation" + void applyCallInst(llvm::Instruction const& inst, llvm::BasicBlock const* end_block, + AbstractStateDummy const& callee_state) {}; + + // Evaluates return instructions, needed for the main function and the debug output + void applyReturnInst(llvm::Instruction const& inst) {}; + + // Handles all cases different from the three above + void applyDefault(llvm::Instruction const& inst) {}; + + // This 'merges' two states, which is the operation we do fixpoint iteration over. Currently, + // there are three possibilities for op: + // 1. UPPER_BOUND: This has to return some upper bound of itself and other, with more precise + // bounds being preferred. + // 2. WIDEN: Same as UPPER_BOUND, but this operation should sacrifice precision to converge + // quickly. Returning T would be fine, though maybe not optimal. For example for intervals, + // an implementation could ensure to double the size of the interval. + // 3. NARROW: Return a value between the intersection of the state and other, and the + // state. In pseudocode: + // intersect(state, other) <= narrow(state, other) <= state + // For all of the above, this operation returns whether the state changed as a result. + // IMPORTANT: The simple fixpoint algorithm only performs UPPER_BOUND, so you do not need to + // implement the others if you just use that one. (The more advanced algorithm in + // fixpoint_widening.cpp uses all three operations.) + bool merge(Merge_op::Type op, AbstractStateDummy const& other) { return false; }; + + // Restrict the set of values to the one that allows 'from' to branch towards + // 'towards'. Starting with the state when exiting from, this should compute (an upper bound of) + // the possible values that would reach the block towards. Doing nothing thus is a valid + // implementation. + void branch(llvm::BasicBlock const& from, llvm::BasicBlock const& towards) {}; + + // Functions to generate the debug output. printIncoming should output the state as of entering + // the basic block, printOutcoming the state when leaving it. + void printIncoming(llvm::BasicBlock const& bb, llvm::raw_ostream& out, int indentation = 0) const {}; + void printOutgoing(llvm::BasicBlock const& bb, llvm::raw_ostream& out, int indentation = 0) const {}; +}; + +} diff --git a/src/callstring.cpp b/src/callstring.cpp new file mode 100644 index 0000000000000000000000000000000000000000..92f36f88dad0ec0c0b73ec77ba127180284f4ac5 --- /dev/null +++ b/src/callstring.cpp @@ -0,0 +1,14 @@ +#include "callstring.h" +#include "simple_interval.h" + +namespace pcpo { + + void InterpretCall(llvm::CallInst const* call, std::vector& operands) { + dbgs(3) << "Call performed\nArguments:\n"; + dbgs(3) << call->getArgOperand(0)->getName() << " "; + dbgs(3) << operands[0] << "\n"; + + } + +} + diff --git a/src/fixpoint.cpp b/src/fixpoint.cpp index f5c40dd587e3cff2aa50b6a382b3350f61a49445..5abe44722613c7de42601c7f4e48e6c0b0ef96d5 100644 --- a/src/fixpoint.cpp +++ b/src/fixpoint.cpp @@ -5,190 +5,330 @@ #include "llvm/IR/CFG.h" #include "llvm/IR/Module.h" -#include "llvm/IR/Dominators.h" -#include "llvm/Analysis/LoopInfo.h" #include "global.h" -#include "fixpoint_widening.cpp" + #include "value_set.h" #include "simple_interval.h" +#include "normalized_conjunction.h" +#include "linear_subspace.h" + +#include "fixpoint_widening.cpp" +#include "hash_utils.h" + +#include "llvm/ADT/PostOrderIterator.h" namespace pcpo { +using namespace llvm; +using std::vector; +using std::pair; +using std::unordered_map; + static llvm::RegisterPass Y("painpass", "AbstractInterpretation Pass"); char AbstractInterpretationPass::ID; int debug_level = DEBUG_LEVEL; // from global.hpp -class AbstractStateDummy { -public: - // This has to initialise the state to bottom. - AbstractStateDummy() = default; - - // Creates a copy of the state. Using the default copy-constructor should be fine here, but if - // some members do something weird you maybe want to implement this as - // AbstractState().merge(state) - AbstractStateDummy(AbstractStateDummy const& state) = default; - - // Initialise the state to the incoming state of the function. This should do something like - // assuming the parameters can be anything. - explicit AbstractStateDummy(llvm::Function const& f) {} - - // Applies the changes needed to reflect executing the instructions in the basic block. Before - // this operation is called, the state is the one upon entering bb, afterwards it should be (an - // upper bound of) the state leaving the basic block. - // predecessors contains the outgoing state for all the predecessors, in the same order as they - // are listed in llvm::predecessors(bb). - void apply(llvm::BasicBlock const& bb, std::vector const& predecessors) {}; - - // This 'merges' two states, which is the operation we do fixpoint iteration over. Currently, - // there are three possibilities for op: - // 1. UPPER_BOUND: This has to return some upper bound of itself and other, with more precise - // bounds being preferred. - // 2. WIDEN: Same as UPPER_BOUND, but this operation should sacrifice precision to converge - // quickly. Returning T would be fine, though maybe not optimal. For example for intervals, - // an implementation could ensure to double the size of the interval. - // 3. NARROW: Return a value between the intersection of the state and other, and the - // state. In pseudocode: - // intersect(state, other) <= narrow(state, other) <= state - // For all of the above, this operation returns whether the state changed as a result. - // IMPORTANT: The simple fixpoint algorithm only performs UPPER_BOUND, so you do not need to - // implement the others if you just use that one. (The more advanced algorithm in - // fixpoint_widening.cpp uses all three operations.) - bool merge(Merge_op::Type op, AbstractStateDummy const& other) { return false; }; - - // Restrict the set of values to the one that allows 'from' to branch towards - // 'towards'. Starting with the state when exiting from, this should compute (an upper bound of) - // the possible values that would reach the block towards. Doing nothing thus is a valid - // implementation. - void branch(llvm::BasicBlock const& from, llvm::BasicBlock const& towards) {}; - - // Functions to generate the debug output. printIncoming should output the state as of entering - // the basic block, printOutcoming the state when leaving it. - void printIncoming(llvm::BasicBlock const& bb, llvm::raw_ostream& out, int indentation = 0) const {}; - void printOutgoing(llvm::BasicBlock const& bb, llvm::raw_ostream& out, int indentation = 0) const {}; + +using Callstring = vector; +using NodeKey = pair; + +// MARK: - To String + +llvm::raw_ostream& operator<<(llvm::raw_ostream& os, BasicBlock const& basic_block) { + os << "%"; + if (llvm::Function const* f = basic_block.getParent()) { + os << f->getName() << "."; + } + return os << basic_block.getName(); +} + +llvm::raw_ostream& operator<<(llvm::raw_ostream& os, Callstring const& callstring) { + for (auto call : callstring) { + os << call->getName(); + if (call != callstring.back()) { + os << " -> "; + } + } + return os; +} + +llvm::raw_ostream& operator<<(llvm::raw_ostream& os, NodeKey const& key) { + return os << "[" << *key.second << "," << key.first << "]"; +} + + +template +struct Node { + BasicBlock const* basic_block; + /// Function calls the lead to this basic block. The last element is always the current function. + Callstring callstring; + AbstractState state = {}; + bool update_scheduled = false; // Whether the node is already in the worklist + + /// Check wether this basic block is the entry block of its function. + bool isEntry() const { + return basic_block == &function()->getEntryBlock(); + } + + /// Function in which this basic block is located. + Function const* function() const { + return callstring.back(); + } }; -// Run the simple fixpoint algorithm. AbstractState should implement the interface documented in -// AbstractStateDummy (no need to subclass or any of that, just implement the methods with the right -// signatures and take care to fulfil the contracts outlines above). +Callstring callstring_for(Function const* function, Callstring const& callstring, int max_length) { + Callstring new_callstring; + for (auto call: callstring) { + if (max_length-- > 0) { + new_callstring.push_back(call); + } else { + return new_callstring; + } + } + new_callstring.push_back(function); + return new_callstring; +} + + +template +vector*> register_function(llvm::Function const* function, Callstring const& callstring, int callstack_depth, unordered_map> &nodes) { + Callstring new_callstring = callstring_for(function, callstring, callstack_depth); + vector*> inserted_nodes; + + for (po_iterator I = po_begin(&function->getEntryBlock()), + IE = po_end(&function->getEntryBlock()); + I != IE; ++I) { + + BasicBlock const* basic_block = *I; + + dbgs(1) << " Found basic block: " << basic_block->getName() << '\n'; + NodeKey key = {new_callstring, basic_block}; + Node node = {basic_block, new_callstring}; + if (node.isEntry()) { + node.state = AbstractState {*node.function()}; + } + inserted_nodes.push_back(&nodes[key]); + nodes[key] = node; + } + return inserted_nodes; +} + +template + void add_to_worklist(vector*> &nodes, vector*> &worklist) { + for (Node* node : nodes) { + node->update_scheduled = true; + worklist.push_back(node); + } +} + + +// Run the simple fixpoint algorithm with callstrings. AbstractState should implement the interface +// documented in AbstractStateDummy (no need to subclass or any of that, just implement the methods +// with the right signatures and take care to fulfil the contracts outlines above). // Note that a lot of this code is duplicated in executeFixpointAlgorithmWidening in // fixpoint_widening.cpp, so if you fix any bugs in here, they probably should be fixed there as // well. // Tip: Look at a diff of fixpoint.cpp and fixpoint_widening.cpp with a visual diff tool (I // recommend Meld.) -template -void executeFixpointAlgorithm(llvm::Module const& M) { - constexpr int iterations_max = 1000; +template +void executeFixpointAlgorithm(Module const& M) { + using Node = Node; // A node in the control flow graph, i.e. a basic block. Here, we need a bit of additional data // per node to execute the fixpoint algorithm. - struct Node { - int id; - llvm::BasicBlock const* bb; - AbstractState state; - bool update_scheduled = false; // Whether the node is already in the worklist - - // If this is set, the algorithm will add the initial values from the parameters of the - // function to the incoming values, which is the correct thing to do for initial basic - // blocks. - llvm::Function const* func_entry = nullptr; - }; - - std::vector nodes; - std::unordered_map nodeIdMap; // Maps basic blocks to the ids of their corresponding nodes - std::vector worklist; // Contains the ids of nodes that need to be processed - // TODO: Check what this does for release clang, probably write out a warning - dbgs(1) << "Initialising fixpoint algorithm, collecting basic blocks\n"; + unordered_map nodes; + vector worklist; - for (llvm::Function const& f: M.functions()) { - // Check for external (i.e. declared but not defined) functions - if (f.empty()) { - dbgs(1) << " Function " << f.getName() << " is external, skipping..."; - continue; - } - - // Register basic blocks - for (llvm::BasicBlock const& bb: f) { - dbgs(1) << " Found basic block " << bb.getName() << '\n'; + // We only consider the main function in the beginning. If no main exists, nothing is evaluated! + Function const* main_func = M.getFunction("main"); - Node node; - node.id = nodes.size(); // Assign new id - node.bb = &bb; - // node.state is default initialised (to bottom) - - nodeIdMap[node.bb] = node.id; - nodes.push_back(node); - } + // TODO: Check what this does for release clang, probably write out a warning + dbgs(1) << "Initialising fixpoint algorithm, collecting basic blocks\n"; - // Push the initial block into the worklist - int entry_id = nodeIdMap.at(&f.getEntryBlock()); - worklist.push_back(entry_id); - nodes[entry_id].update_scheduled = true; - nodes[entry_id].func_entry = &f; - } + // Register basic blocks of the main function + auto main_basic_blocks = register_function(main_func, {}, callstack_depth, nodes); + add_to_worklist(main_basic_blocks, worklist); dbgs(1) << "\nWorklist initialised with " << worklist.size() << (worklist.size() != 1 ? " entries" : " entry") << ". Starting fixpoint iteration...\n"; for (int iter = 0; !worklist.empty() and iter < iterations_max; ++iter) { - Node& node = nodes[worklist.back()]; + Node& node = *worklist.back(); worklist.pop_back(); node.update_scheduled = false; - dbgs(1) << "\nIteration " << iter << ", considering basic block " << node.bb->getName() << '\n'; + dbgs(1) << "\nIteration " << iter << ", considering basic block " + << *node.basic_block << " with callstring " + << node.callstring << '\n'; AbstractState state_new; // Set to bottom - if (node.func_entry) { + if (node.isEntry()) { dbgs(1) << " Merging function parameters, is entry block\n"; - AbstractState state_entry {*node.func_entry}; - state_new.merge(Merge_op::UPPER_BOUND, state_entry); + // if it is the entry node, then its state should be top + state_new.merge(merge_op, node.state); + state_new.isBottom = false; } - dbgs(1) << " Merge of " << llvm::pred_size(node.bb) - << (llvm::pred_size(node.bb) != 1 ? " predecessors.\n" : " predecessor.\n"); + dbgs(1) << " Merge of " << pred_size(node.basic_block) + << (pred_size(node.basic_block) != 1 ? " predecessors.\n" : " predecessor.\n"); // Collect the predecessors - std::vector predecessors; - for (llvm::BasicBlock const* bb: llvm::predecessors(node.bb)) { - dbgs(3) << " Merging basic block " << bb->getName() << '\n'; + vector predecessors; + for (BasicBlock const* basic_block: llvm::predecessors(node.basic_block)) { + dbgs(3) << " Merging basic block " << *basic_block << '\n'; - AbstractState state_branched {nodes[nodeIdMap[bb]].state}; - state_branched.branch(*bb, *node.bb); - state_new.merge(Merge_op::UPPER_BOUND, state_branched); + AbstractState state_branched {nodes[{{node.callstring}, basic_block}].state}; + state_branched.branch(*basic_block, *node.basic_block); + state_new.merge(merge_op, state_branched); predecessors.push_back(state_branched); } - dbgs(2) << " Relevant incoming state is:\n"; state_new.printIncoming(*node.bb, dbgs(2), 4); + dbgs(2) << " Relevant incoming state is:\n"; state_new.printIncoming(*node.basic_block, dbgs(2), 4); // Apply the basic block dbgs(3) << " Applying basic block\n"; - state_new.apply(*node.bb, predecessors); + + if (state_new.isBottom) { + dbgs(3) << " Basic block is unreachable, everything is bottom\n"; + } else { + // Applies all instrucions of a basic block + for (Instruction const& inst: *node.basic_block) { + + // Handles return instructions + if (isa(&inst)) { + state_new.applyReturnInst(inst); + } + + // If the result of the instruction is not used, there is no reason to compute + // it. (There are no side-effects in LLVM IR. (I hope.)) + if (inst.use_empty()) { + // Except for call instructions, we still want to get that information + if (not isa(&inst)) { + dbgs(3) << " Empty use of instruction, " << inst.getOpcodeName() << " skipping...\n"; + continue; + } + } + + // Handles merging points + if (isa(&inst)) { + + state_new.applyPHINode(*node.basic_block, predecessors, inst); + + // Handles function calls + } else if (CallInst const* call = dyn_cast(&inst)) { + + // Checks if an input parameter for the callee is bottom. If so, + // then skip the calculation of the call instruction for now + if (state_new.checkOperandsForBottom(inst)) continue; + + Function const* callee_func = call->getCalledFunction(); + + // Checks for functions, such as printf and skips them + if (callee_func->empty()) { + dbgs(3) << " Function " << callee_func->getName() << " is external, skipping...\n"; + continue; + } + + Callstring new_callstring = callstring_for(callee_func, node.callstring, callstack_depth); + + NodeKey callee_element = {new_callstring, &callee_func->getEntryBlock()}; + vector callee_basic_blocks; + bool changed; + + // Checks whether a node with key [%callee entry block, %caller basic block], + // i.e. an entry block with callstring of caller basic block, exists. + // If not, all nodes with their corrosponding keys are initilized for the callee function. + if (nodes.find(callee_element) == nodes.end()) { + // Check if abstract_state of call.bb is bottom or not + dbgs(3) << " No information regarding function call %" << call->getCalledFunction()->getName() << "\n"; + + callee_basic_blocks = register_function(callee_func, node.callstring, callstack_depth, nodes); + + nodes[callee_element].state = AbstractState{ callee_func, state_new, call }; + changed = true; + } else { + //update callee + AbstractState before = nodes[callee_element].state; + + // Collect all basic blocks of callee_func + for (po_iterator I = po_begin(&callee_func->getEntryBlock()), + IE = po_end(&callee_func->getEntryBlock()); + I != IE; ++I) { + BasicBlock const* basic_block = *I; + NodeKey key = {new_callstring, basic_block}; + callee_basic_blocks.push_back(&nodes[key]); + } + + AbstractState state_update{ callee_func, state_new, call }; + changed = nodes[callee_element].state.merge(merge_op, state_update); + } + + //Getting the last block + BasicBlock const* end_block = &*std::prev(callee_func->end()); + NodeKey end_element = {new_callstring, end_block}; + state_new.applyCallInst(inst, end_block, nodes[end_element].state); + + // If input parameters have changed, we want to interpret the function once again + // and reevaluate the nodes of possible callers. + if (changed) { + for (auto& [key, value]: nodes) { + if (key.second == node.basic_block and not value.update_scheduled) { + dbgs(3) << " Adding possible caller " << key << " to worklist\n"; + worklist.push_back(&value); + value.update_scheduled = true; + } + } + + // Checks if the key of the callee functions entry node is already on the worklist, + // this is necessary for recursions. + for (Node* elem: callee_basic_blocks) { + if (!elem->update_scheduled) { + worklist.push_back(elem); + elem->update_scheduled = true; + + dbgs(3) << " Adding callee " << *elem->basic_block << " " << elem->callstring << " to worklist\n"; + } else { + dbgs(3) << " Callee already on worklist, nothing to add...\n"; + } + } + } + } else { + if (state_new.checkOperandsForBottom(inst)) continue; + state_new.applyDefault(inst); + } + } + } // Merge the state back into the node dbgs(3) << " Merging with stored state\n"; - bool changed = node.state.merge(Merge_op::UPPER_BOUND, state_new); + bool changed = node.state.merge(merge_op, state_new); - dbgs(2) << " Outgoing state is:\n"; state_new.printOutgoing(*node.bb, dbgs(2), 4); + dbgs(2) << " Outgoing state " << (changed ? "changed" : "didn't change") << ":\n"; state_new.printOutgoing(*node.basic_block, dbgs(2), 4); // No changes, so no need to do anything else if (not changed) continue; - dbgs(2) << " State changed, notifying " << llvm::succ_size(node.bb) - << (llvm::succ_size(node.bb) != 1 ? " successors\n" : " successor\n"); + dbgs(2) << " State changed, notifying " << succ_size(node.basic_block) + << (succ_size(node.basic_block) != 1 ? " successors\n" : " successor\n"); // Something changed and we will need to update the successors - for (llvm::BasicBlock const* succ_bb: llvm::successors(node.bb)) { - Node& succ = nodes[nodeIdMap[succ_bb]]; + for (BasicBlock const* succ_bb: successors(node.basic_block)) { + NodeKey succ_key = {{node.callstring}, succ_bb}; + Node& succ = nodes[succ_key]; if (not succ.update_scheduled) { - worklist.push_back(succ.id); + worklist.push_back(&succ); succ.update_scheduled = true; - dbgs(3) << " Adding " << succ_bb->getName() << " to worklist\n"; + dbgs(3) << " Adding " << succ_key << " to worklist\n"; } } } @@ -196,30 +336,33 @@ void executeFixpointAlgorithm(llvm::Module const& M) { if (!worklist.empty()) { dbgs(0) << "Iteration terminated due to exceeding loop count.\n"; } - + // Output the final result dbgs(0) << "\nFinal result:\n"; - for (Node const& i: nodes) { - dbgs(0) << i.bb->getName() << ":\n"; - i.state.printOutgoing(*i.bb, dbgs(0), 2); + for (auto const& [key, node]: nodes) { + dbgs(0) << key << ":\n"; + node.state.printOutgoing(*node.basic_block, dbgs(0), 2); } + } bool AbstractInterpretationPass::runOnModule(llvm::Module& M) { using AbstractState = AbstractStateValueSet; - // Use either the standard fixpoint algorithm or the version with widening - //executeFixpointAlgorithm (M); - executeFixpointAlgorithmWidening(M); +// Use either the standard fixpoint algorithm or the version with widening +// executeFixpointAlgorithm(M); +// executeFixpointAlgorithm(M); + executeFixpointAlgorithm(M); +// executeFixpointAlgorithmWidening(M); // We never change anything return false; } - void AbstractInterpretationPass::getAnalysisUsage(llvm::AnalysisUsage& info) const { info.setPreservesAll(); } + } /* end of namespace pcpo */ diff --git a/src/fixpoint_widening.cpp b/src/fixpoint_widening.cpp index e9e65fb7787d589c7c30e42ef0f2131bcd6f2226..175d5ec65fc7b28a5aceb6734de50a7b1075e6b9 100644 --- a/src/fixpoint_widening.cpp +++ b/src/fixpoint_widening.cpp @@ -1,4 +1,3 @@ - #include #include @@ -8,105 +7,104 @@ #include "llvm/Analysis/LoopInfo.h" #include "global.h" +#include "general.h" #include "value_set.h" #include "simple_interval.h" namespace pcpo { -// Run the fixpoint algorithm using widening and narrowing. Note that a lot of code in here is -// duplicated from executeFixpointAlgorithm. If you just want to understand the basic fixpoint -// iteration, you should take a look at that instead. -// The interface for AbstractState is the same as for the simple fixpoint (documented in -// AbstractStateDummy), except that is needs to support the merge operations WIDEN and NARROW, as -// you can probably guess. +// Run the simple fixpoint algorithm with callstrings. AbstractState should implement the interface +// documented in AbstractStateDummy (no need to subclass or any of that, just implement the methods +// with the right signatures and take care to fulfil the contracts outlines above). +// Note that a lot of this code is duplicated in executeFixpointAlgorithmWidening in +// fixpoint_widening.cpp, so if you fix any bugs in here, they probably should be fixed there as +// well. // Tip: Look at a diff of fixpoint.cpp and fixpoint_widening.cpp with a visual diff tool (I // recommend Meld.) template void executeFixpointAlgorithmWidening(llvm::Module& M) { constexpr int iterations_max = 1000; - constexpr int widen_after = 2; // Number of iteration after which we switch to widening. + constexpr int widen_after = 2; // A node in the control flow graph, i.e. a basic block. Here, we need a bit of additional data // per node to execute the fixpoint algorithm. struct Node { - int id; - llvm::BasicBlock* bb; + llvm::BasicBlock const* bb; + llvm::BasicBlock const* callstring; AbstractState state; bool update_scheduled = false; // Whether the node is already in the worklist // If this is set, the algorithm will add the initial values from the parameters of the // function to the incoming values, which is the correct thing to do for initial basic // blocks. - llvm::Function* func_entry = nullptr; + llvm::Function const* func_entry = nullptr; - bool should_widen = false; // Whether we want to widen at this node - int change_count = 0; // How often has node changed during iterations + bool should_widen = false; // Whether we want to widen at this node + int change_count = 0; // How often has node changed during iterations }; - std::vector nodes; - std::unordered_map nodeIdMap; // Maps basic blocks to the ids of their corresponding nodes - std::vector worklist; // Contains the ids of nodes that need to be processed + std::unordered_map nodes; + std::vector worklist; // Contains the tuples of basic block and callstring, which is the key of a node, that need to be processed bool phase_narrowing = false; // If this is set, we are in the narrowing phase of the fixpoint algorithm + // We only consider the main function in the beginning. If no main exists, nothing is evaluated! + llvm::Function* main_func = M.getFunction("main"); + + //creating dummy block for callstrings of the main block, since the main function is not called from another function + llvm::BasicBlock const* dummy_block = llvm::BasicBlock::Create(M.getContext(), "dummy"); + + // TODO: Check what this does for release clang, probably write out a warning dbgs(1) << "Initialising fixpoint algorithm, collecting basic blocks\n"; // Push dummy element indicating the end of the widening phase of the fixpoint algorithm. As the // worklist is processed in a LIFO order, this will be the last element coming out, indicating // that the worklist is empty. Once that happens, we have obtained a valid solution (using // widening) and can start to apply narrowing. - worklist.push_back(-1); - - for (llvm::Function& f: M.functions()) { - // Check for external (i.e. declared but not defined) functions - if (f.empty()) { - dbgs(1) << " Function " << f.getName() << " is external, skipping..."; - continue; - } + worklist.push_back(std::make_tuple(dummy_block, dummy_block)); - // Register basic blocks - for (llvm::BasicBlock& bb: f) { - dbgs(1) << " Found basic block " << bb.getName() << '\n'; + // Register basic blocks + for (llvm::BasicBlock const& bb: *main_func) { + dbgs(1) << " Found basic block main." << bb.getName() << '\n'; - Node node; - node.id = nodes.size(); // Assign new id - node.bb = &bb; - // node.state is default initialised (to bottom) + Node node; + node.bb = &bb; + node.callstring = dummy_block; + // node.state is default initialised (to bottom) - nodeIdMap[node.bb] = node.id; - nodes.push_back(node); - } - - // Gather information about loops in the function. (We only want to widen a single node for - // each loop, as that is enough to guarantee fast termination.) - llvm::LoopInfoBase loopInfoBase; - loopInfoBase.analyze(llvm::DominatorTree {f}); - for (llvm::Loop* loop: loopInfoBase) { - // We want to widen only the conditions of the loops - nodes[nodeIdMap.at(loop->getHeader())].should_widen = true; - dbgs(1) << " Enabling widening for basic block " << loop->getHeader()->getName() << '\n'; - } + // nodes of main block have the callstring of a dummy block + nodes[std::make_tuple(&bb, dummy_block)] = node; + } - // Push the initial block into the worklist - int entry_id = nodeIdMap.at(&f.getEntryBlock()); - worklist.push_back(entry_id); - nodes[entry_id].update_scheduled = true; - nodes[entry_id].func_entry = &f; + // Gather information about loops in the function. (We only want to widen a single node for + // each loop, as that is enough to guarantee fast termination.) + llvm::LoopInfoBase loopInfoBase; + loopInfoBase.analyze(llvm::DominatorTree{ *main_func }); + for (llvm::Loop* loop : loopInfoBase) { + // We want to widen only the conditions of the loops + nodes[std::make_tuple(loop->getHeader(), dummy_block)].should_widen = true; + dbgs(1) << " Enabling widening for basic block " << loop->getHeader()->getName() << '\n'; } + // Push the initial block into the worklist + auto init_element = std::make_tuple(&main_func->getEntryBlock(), dummy_block); + worklist.push_back(init_element); + nodes[init_element].update_scheduled = true; + nodes[init_element].state = AbstractState {*main_func}; + nodes[init_element].func_entry = main_func; + dbgs(1) << "\nWorklist initialised with " << worklist.size() << (worklist.size() != 1 ? " entries" : " entry") << ". Starting fixpoint iteration...\n"; + // Check whether we have reached the end of the widening phase for (int iter = 0; !worklist.empty() and iter < iterations_max; ++iter) { - - // Check whether we have reached the end of the widening phase - if (worklist.back() == -1) { + if (worklist.back() == std::make_tuple(dummy_block, dummy_block)) { worklist.pop_back(); phase_narrowing = true; dbgs(1) << "\nStarting narrowing in iteration " << iter << "\n"; // We need to consider all nodes once more. - for (Node const& i: nodes) { - worklist.push_back(i.id); + for (auto const& i : nodes) { + worklist.push_back(i.first); } --iter; @@ -117,15 +115,18 @@ void executeFixpointAlgorithmWidening(llvm::Module& M) { worklist.pop_back(); node.update_scheduled = false; - dbgs(1) << "\nIteration " << iter << ", considering basic block " << node.bb->getName() << '\n'; + dbgs(1) << "\nIteration " << iter << ", considering basic block " + << _bb_to_str(node.bb) << " with callstring " + << _bb_to_str(node.callstring) << '\n'; AbstractState state_new; // Set to bottom if (node.func_entry) { dbgs(1) << " Merging function parameters, is entry block\n"; - AbstractState state_entry {*node.func_entry}; - state_new.merge(Merge_op::UPPER_BOUND, state_entry); + // if it is the entry node, then its state should be top + state_new.isBottom = false; + state_new.merge(Merge_op::UPPER_BOUND, node.state); } dbgs(1) << " Merge of " << llvm::pred_size(node.bb) @@ -133,25 +134,126 @@ void executeFixpointAlgorithmWidening(llvm::Module& M) { // Collect the predecessors std::vector predecessors; - for (llvm::BasicBlock* bb: llvm::predecessors(node.bb)) { - dbgs(3) << " Merging basic block " << bb->getName() << '\n'; + for (llvm::BasicBlock const* bb: llvm::predecessors(node.bb)) { + dbgs(3) << " Merging basic block " << _bb_to_str(bb) << '\n'; - AbstractState state_branched {nodes[nodeIdMap[bb]].state}; + AbstractState state_branched {nodes[std::make_tuple(bb, node.callstring)].state}; state_branched.branch(*bb, *node.bb); state_new.merge(Merge_op::UPPER_BOUND, state_branched); predecessors.push_back(state_branched); } - dbgs(2) << " Relevant incoming state\n"; state_new.printIncoming(*node.bb, dbgs(2), 4); + dbgs(2) << " Relevant incoming state is:\n"; state_new.printIncoming(*node.bb, dbgs(2), 4); // Apply the basic block dbgs(3) << " Applying basic block\n"; - state_new.apply(*node.bb, predecessors); + + if (state_new.isBottom) { + dbgs(3) << " Basic block is unreachable, everything is bottom\n"; + } else { + for (llvm::Instruction const& inst: *node.bb) { + + // Handles return instructions + if (llvm::dyn_cast(&inst)) { + state_new.applyReturnInst(inst); + } + + // If the result of the instruction is not used, there is no reason to compute + // it. (There are no side-effects in LLVM IR. (I hope.)) + if (inst.use_empty()) { + // Except for call instructions, we still want to get that information + if (not llvm::dyn_cast(&inst)) { + dbgs(3) << " Empty use of instruction, skipping...\n"; + continue; + } + } + + // Handles merging points + if (llvm::PHINode const* phi = llvm::dyn_cast(&inst)) { + + state_new.applyPHINode(*node.bb, predecessors, inst); + + // Handles function calls + } else if (llvm::CallInst const* call = llvm::dyn_cast(&inst)) { + + // Checks if an input parameter for the callee is bottom. If so, + // then skip the calculation of the call instruction for now + if (state_new.checkOperandsForBottom(inst)) continue; + + llvm::Function const* callee_func = call->getCalledFunction(); + + // Checks for functions, such as printf and skips them + if (callee_func->empty()) { + dbgs(3) << " Function " << callee_func->getName() << " is external, skipping...\n"; + continue; + } + + auto callee_element = std::make_tuple(&callee_func->getEntryBlock(), node.bb); + bool changed; + + // Checks whether a node with key [%callee entry block, %caller basic block], + // i.e. an entry block with callstring of caller basic block, exists. + // If not, all nodes with their corrosponding keys are initilized for the callee function. + if (nodes.find(callee_element) == nodes.end()) { + // Check if abstract_state of call.bb is bottom or not + dbgs(3) << " No information regarding function call %" << call->getCalledFunction()->getName() << "\n"; + + // Register basic blocks + for (llvm::BasicBlock const& bb : *callee_func) { + dbgs(4) << " Found basic block " << _bb_to_str(&bb) << '\n'; + + Node callee_node; + callee_node.bb = &bb; + callee_node.callstring = node.bb; + // node.state is default initialised (to bottom) + + nodes[std::make_tuple(&bb, node.bb)] = callee_node; + } + + nodes[callee_element].state = AbstractState{ callee_func, state_new, call }; + nodes[callee_element].func_entry = callee_func; + changed = true; + } else { + AbstractState state_update{ callee_func, state_new, call }; + changed = nodes[callee_element].state.merge(Merge_op::UPPER_BOUND, state_update); + } + + //Getting the last block + llvm::BasicBlock const* end_block = &*std::prev(callee_func->end()); + auto end_element = std::make_tuple(end_block, node.bb); + + state_new.applyCallInst(inst, end_block, nodes[end_element].state); + + // If input parameters have changed, we want to interpret the function once again + // and reevaluate the nodes of possible callers. + if (changed) { + for (std::pair& i : nodes) { + if (std::get<0>(i.first) == node.bb and not i.second.update_scheduled) { + dbgs(3) << " Adding possible caller " << _bb_key_to_str(i.first) << " to worklist\n"; + worklist.push_back(i.first); + i.second.update_scheduled = true; + } + } + + // Checks if the key of the callee functions entry node is already on the worklist, + // this is necessary for recursions. + if (not nodes[callee_element].update_scheduled) { + worklist.push_back(callee_element); + nodes[callee_element].update_scheduled = true; + + dbgs(3) << " Adding callee " << _bb_key_to_str(callee_element) << " to worklist\n"; + } else { + dbgs(3) << " Callee already on worklist, nothing to add...\n"; + } + } + } else { + if (state_new.checkOperandsForBottom(inst)) continue; + state_new.applyDefault(inst); + } + } + } // Merge the state back into the node - dbgs(3) << " Merging with stored state\n"; - - // We need to figure out what operation to apply. Merge_op::Type op; if (not phase_narrowing) { if (node.should_widen and node.change_count >= widen_after) { @@ -163,27 +265,31 @@ void executeFixpointAlgorithmWidening(llvm::Module& M) { op = Merge_op::NARROW; } - // Now do the actual operation + dbgs(3) << " Merging with stored state\n"; bool changed = node.state.merge(op, state_new); - dbgs(2) << " Outgoing state\n"; state_new.printOutgoing(*node.bb, dbgs(2), 4); + dbgs(2) << " Outgoing state is:\n"; state_new.printOutgoing(*node.bb, dbgs(2), 4); // No changes, so no need to do anything else if (not changed) continue; ++node.change_count; - + + dbgs(2) << " Node change count:"; + dbgs(2) << node.change_count << "\n"; + dbgs(2) << " State changed, notifying " << llvm::succ_size(node.bb) << (llvm::succ_size(node.bb) != 1 ? " successors\n" : " successor\n"); // Something changed and we will need to update the successors - for (llvm::BasicBlock* succ_bb: llvm::successors(node.bb)) { - Node& succ = nodes[nodeIdMap[succ_bb]]; + for (llvm::BasicBlock const* succ_bb: llvm::successors(node.bb)) { + auto succ_key = std::make_tuple(succ_bb, node.callstring); + Node& succ = nodes[succ_key]; if (not succ.update_scheduled) { - worklist.push_back(succ.id); + worklist.push_back(succ_key); succ.update_scheduled = true; - dbgs(3) << " Adding " << succ_bb->getName() << " to worklist\n"; + dbgs(3) << " Adding " << _bb_key_to_str(succ_key) << " to worklist\n"; } } } @@ -191,13 +297,15 @@ void executeFixpointAlgorithmWidening(llvm::Module& M) { if (!worklist.empty()) { dbgs(0) << "Iteration terminated due to exceeding loop count.\n"; } - + // Output the final result dbgs(0) << "\nFinal result:\n"; - for (Node const& i: nodes) { - dbgs(0) << i.bb->getName() << ":\n"; - i.state.printOutgoing(*i.bb, dbgs(0), 2); + for (std::pair i: nodes) { + dbgs(0) << _bb_key_to_str(i.first) << ":\n"; + i.second.state.printOutgoing(*i.second.bb, dbgs(0), 2); } + } } /* end of namespace pcpo */ + diff --git a/src/general.h b/src/general.h new file mode 100644 index 0000000000000000000000000000000000000000..6fe9a6eef7b60669093ba390d4de76301b602231 --- /dev/null +++ b/src/general.h @@ -0,0 +1,58 @@ +#pragma once +#include "llvm/ADT/Hashing.h" +#include "llvm/IR/CFG.h" + +// C++ unordered_maps don't support tuples as keys, which is why one has to define the hash function for said tuple. +// If weird behaviours are observed, chances are high, that this hash function is not working properly, as we don't know +// if the llvm::hash_combine function works for all cases. So far it did (hopefully in the future as well). +namespace std { + + template <> + struct hash> + { + std::size_t operator()(const std::tuple& k) const + { + llvm::hash_code hash_code = llvm::hash_combine(std::get<0>(k), std::get<1>(k)); + return hash_code; + } + }; + +template +struct hash> { + std::size_t operator()(const std::pair &pair) const { + return llvm::hash_combine(pair.first, pair.second); + } +}; + +} + +namespace pcpo { + + // The definition of the tuple, for ease of writing. First element is the basic block we currently evaluate. + // Second element is the callstring, i.e. the basic block from which the function is called, which the + // first element is part of. (It's complicated to put in words). + // The callstring is normally represented by the caller function, as described in "Compiler Design: Analysis and Transformation", + // but since this code operates on BasicBlocks instead of the whole function, we set the callstring to the BasicBlock, + // from which the function call is performed. + typedef std::tuple bb_key; + + // Helper functions for debug output, pretty much self explanatory. + static std::string _bb_to_str(llvm::BasicBlock const* bb) { + std::string str = "%"; + if (llvm::Function const* f = bb->getParent()) { + str.append(f->getName()); + str.append("."); + } + str.append(bb->getName()); + return str; + } + + static std::string _bb_key_to_str(bb_key key) { + std::string str = "["; + str.append(_bb_to_str(std::get<0>(key))); + str.append(", "); + str.append(_bb_to_str(std::get<1>(key))); + str.append("]"); + return str; + } +} diff --git a/src/global.h b/src/global.h index 7775c16feef8216f5c2e444ef155b71d3af8d472..9ae840315106ad0a218875e296dfd8025309a4b9 100644 --- a/src/global.h +++ b/src/global.h @@ -1,6 +1,7 @@ #pragma once -#include "llvm/Support/raw_ostream.h" +#include +#include namespace pcpo { diff --git a/src/hash_utils.h b/src/hash_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..66dc876a18904b3b7046b6f19d17e7e3f7c85c48 --- /dev/null +++ b/src/hash_utils.h @@ -0,0 +1,43 @@ +#pragma once + +#include +#include + +#include "llvm/ADT/Hashing.h" + +namespace std { + +template +struct hash> { + size_t operator()(tuple const& t) const { + size_t seed = 0; + apply([&seed](const auto&... item) {(( seed = llvm::hash_combine(seed, item) ), ...);}, t); + return seed; + } +}; + +template +struct hash> { + size_t operator()(pair const& in) const { + return llvm::hash_value(in); + } +}; + + +template +struct hash> { + size_t operator()(vector const& in) const { + return llvm::hash_combine_range(in.begin(), in.end()); + } +}; + +} + +namespace llvm { + +template +static hash_code hash_value(std::vector const& in) { + return hash_combine_range(in.begin(), in.end()); +} + +} diff --git a/src/linear_equality.cpp b/src/linear_equality.cpp new file mode 100644 index 0000000000000000000000000000000000000000..07ca8b1dae9e4ef893b5cff2ba2d04c2f3667bbf --- /dev/null +++ b/src/linear_equality.cpp @@ -0,0 +1,61 @@ +#include "linear_equality.h" + + +namespace pcpo { + +using namespace llvm; + +LinearEquality::LinearEquality(Value const* y) { + this->y = y; + this->a = 1; + this->x = y; + this->b = 0; +} + +LinearEquality::LinearEquality(Value const* y, int64_t a, Value const* x, int64_t b) { + this->y = y; + this->a = a; + this->x = x; + this->b = b; +} + +LinearEquality::LinearEquality(ConstantInt const* y) { + this->y = y; + this->a = 1; + this->x = nullptr; + this->b = y->getSExtValue(); +} + +raw_ostream& operator<<(raw_ostream& os, LinearEquality a) { + os << "{ "; + if (a.y != nullptr && a.y->hasName()) { + os << a.y->getName() << " = "; + } else if (a.y != nullptr) { + os << a.y << " = "; + } else { + os << " = "; + } + + if (a.x != nullptr) { + if (a.x->hasName()) { + os << a.a << " * " << a.x->getName(); + } else { + os << a.a << " * " << a.x; + } + + if (a.b > 0) { + os << " + " << a.b; + } else if (a.b < 0) { + os << a.b; + } + } else { + os << a.b; + } + + os << " }"; + + return os; +} + +} + diff --git a/src/linear_equality.h b/src/linear_equality.h new file mode 100644 index 0000000000000000000000000000000000000000..a6f24513709b2af965e854d22fc0fa01be6d944d --- /dev/null +++ b/src/linear_equality.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include + +#include "global.h" + +namespace pcpo { + +class LinearEquality { + +public: + LinearEquality() = default; + LinearEquality(LinearEquality const&) = default; + LinearEquality(llvm::Value const* y); + LinearEquality(llvm::Value const* y, int64_t a, llvm::Value const* x, int64_t b); + LinearEquality(llvm::ConstantInt const* y); + // y = a * x + b + llvm::Value const* y; + // APInt would be nicer, but our anlysis doesnt care about bit width + int64_t a; + llvm::Value const* x; + int64_t b; + + inline bool operator<(LinearEquality const& rhs) const { + if (y == rhs.y) { + if (a == rhs.a) { + if (x == rhs.x) { + if (b == rhs.b) { + return false; + } else { + return b < rhs.b; + } + } else { + return x < rhs.x; + } + } else { + return a < rhs.a; + } + } else { + return y < rhs.y; + } + }; + + inline bool operator>(LinearEquality const& rhs) const { + return *this < rhs; + }; + + inline bool operator==(LinearEquality const& rhs) const { + return y == rhs.y && a == rhs.a && x == rhs.x && b == rhs.b; + }; + + inline bool operator!=(LinearEquality const& rhs) const { + return !(*this == rhs); + }; + + bool isConstant() const { return x == nullptr; }; + bool isTrivial() const { return x == y; }; +}; + +llvm::raw_ostream& operator<<(llvm::raw_ostream& os, LinearEquality a); + +} diff --git a/src/linear_subspace.cpp b/src/linear_subspace.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d6b844108841a9eb9d6cd044004abc5fb5b183e9 --- /dev/null +++ b/src/linear_subspace.cpp @@ -0,0 +1,400 @@ +#include "linear_subspace.h" +#include "global.h" + +#include "llvm/IR/CFG.h" + +#include + +using namespace llvm; +using std::vector; +using std::unordered_map; +using std::set; + +namespace pcpo { + +// MARK: - Initializers + +LinearSubspace::LinearSubspace(Function const& func) { + index = createVariableIndexMap(func); + basis = {MatrixType(getNumberOfVariables() + 1)}; + isBottom = true; +} + +LinearSubspace::LinearSubspace(Function const* callee_func, LinearSubspace const& state, CallInst const* call) { + assert(callee_func->arg_size() == call->getNumArgOperands()); + index = state.index; + basis = state.basis; + + for (Argument const& arg: callee_func->args()) { + Value* value = call->getArgOperand(arg.getArgNo()); + if (value->getType()->isIntegerTy()) { + if (ConstantInt const* c = dyn_cast(value)) { + affineAssignment(&arg, 1, nullptr, c->getSExtValue()); + } else { + affineAssignment(&arg, 1, value, 0); + } + } + } + isBottom = true; +} + +// MARK: - AbstractState Interface + +void LinearSubspace::applyPHINode(BasicBlock const& bb, vector const& pred_values, Instruction const& phi) { + PHINode const* phiNode = dyn_cast(&phi); + int i = 0; + + for (BasicBlock const* pred_bb: llvm::predecessors(&bb)) { + auto& incoming_value = *phiNode->getIncomingValueForBlock(pred_bb); + auto& incoming_state = pred_values[i]; + // Predecessor states should have been merged before. This is just bookkeeping. + if (llvm::ConstantInt const* c = llvm::dyn_cast(&incoming_value)) { + LinearSubspace acc = *this; + acc.affineAssignment(&phi, 1, nullptr, c->getSExtValue()); + merge(Merge_op::UPPER_BOUND, acc); + } else { + LinearSubspace acc = *this; + for (auto m: incoming_state.basis) { + acc.affineAssignment(&phi, 1, &incoming_value, 0); + } + merge(Merge_op::UPPER_BOUND, acc); + } + i++; + } +} + +void LinearSubspace::applyCallInst(Instruction const& inst, BasicBlock const* end_block, LinearSubspace const& callee_state) { + if (callee_state.isBottom) { + isBottom = true; + } else { + basis = callee_state.basis; + } +} + +void LinearSubspace::applyReturnInst(Instruction const& inst) { + Value const* ret_val = dyn_cast(&inst)->getReturnValue(); + if (ret_val && ret_val->getType()->isIntegerTy()) { + if (ConstantInt const* c = dyn_cast(ret_val)) { + affineAssignment(&inst, 1, nullptr, c->getSExtValue()); + } else { + affineAssignment(&inst, 1, ret_val, 0); + } + } else { + nonDeterminsticAssignment(&inst); + } +} + +void LinearSubspace::applyDefault(Instruction const& inst) { + if (inst.getNumOperands() != 2) return nonDeterminsticAssignment(&inst); + + // We only deal with integer types + IntegerType const* type = dyn_cast(inst.getType()); + if (not type) return nonDeterminsticAssignment(&inst); + + type = dyn_cast(inst.getOperand(0)->getType()); + if (not type) return nonDeterminsticAssignment(&inst); + + type = dyn_cast(inst.getOperand(1)->getType()); + if (not type) return nonDeterminsticAssignment(&inst); + + if (isa(inst.getOperand(0)) || isa(inst.getOperand(1))) { + return nonDeterminsticAssignment(&inst); + } + + switch (inst.getOpcode()) { + case Instruction::Add: + return Add(inst); + case Instruction::Sub: + return Sub(inst); + case Instruction::Mul: + return Mul(inst); + case Instruction::SDiv: + case Instruction::UDiv: + default: + return nonDeterminsticAssignment(&inst); + } +} + +bool LinearSubspace::merge(Merge_op::Type op, LinearSubspace const& other) { + index.insert(other.index.begin(), other.index.end()); + + if (isBottom && other.isBottom) { + basis = other.basis; + return false; + } else if (isBottom && !other.isBottom) { + basis = other.basis; + isBottom = false; + return true; + } else if (!isBottom && other.isBottom) { + return false; + } else if (!isBottom && !other.isBottom) { + switch (op) { + case Merge_op::UPPER_BOUND: return leastUpperBound(other); + default: abort(); + } + } +} + +// MARK: - Lattice Operations + +bool LinearSubspace::leastUpperBound(LinearSubspace const& rhs) { + assert(getNumberOfVariables() == rhs.getNumberOfVariables()); + vector before = basis; + vector> vectors; + vectors.reserve(basis.size() + rhs.basis.size()); + for (MatrixType m: basis) { + vectors.push_back(m.toVector()); + } + + for (MatrixType m: rhs.basis) { + vectors.push_back(m.toVector()); + } + + if (vectors.empty()) { + return false; + } + + MatrixType combined = MatrixType(vectors); + MatrixType result = MatrixType::span(combined, true); + + basis = result.reshapeColumns(getHeight(), getWidth()); + // FIXME: figure out a better way to detect changes + return before != basis; +} + +// MARK: - Assignments + +// xi = a1x1 + ... + anxn + a0 +void LinearSubspace::affineAssignment(Value const* xi, unordered_map relations, T constant) { + MatrixType Wr = MatrixType(getNumberOfVariables() + 1); + Wr.setValue(index.at(xi),index.at(xi), 0); + Wr.setValue(0,index.at(xi), constant); + + for (auto [variable, factor]: relations) { + Wr.setValue(index.at(variable),index.at(xi), factor); + } + + // FIXME: this seems quite inefficient + MatrixType vector = MatrixType(Wr.toVector()); + MatrixType vectorSpan = MatrixType::span(vector, true); + Wr = vectorSpan.reshape(Wr.getHeight(), Wr.getWidth()); + + + if (basis.empty()) { + basis.push_back(Wr); + } + + for (MatrixType& matrix: basis) { + matrix *= Wr; + } +} + +// xi = a * xj + b +void LinearSubspace::affineAssignment(Value const* xi, T a, Value const* xj, T b) { + if (xj == nullptr) { + affineAssignment(xi, {}, b); + } else { + affineAssignment(xi, {{xj,a}}, b); + } +} + +// xi = ? +void LinearSubspace::nonDeterminsticAssignment(Value const* xi) { + return; + if (index.count(xi) == 0) return; + + MatrixType T0 = MatrixType(getNumberOfVariables() + 1); + MatrixType T1 = MatrixType(getNumberOfVariables() + 1); + + T0.setValue(index.at(xi),index.at(xi), 0); + T0.setValue(0,index.at(xi), 0); + + T1.setValue(index.at(xi),index.at(xi), 0); + T1.setValue(0,index.at(xi), 1); + + vector> assignment_vectors; + assignment_vectors.push_back(T0.toVector()); + assignment_vectors.push_back(T1.toVector()); + + MatrixType combined = MatrixType(assignment_vectors); + MatrixType result = MatrixType::span(combined, true); + + vector span = result.reshapeColumns(T0.getHeight(), T0.getWidth()); + + if (basis.empty()) { + basis = span; + } + + for (MatrixType& matrix_state: basis) { + for (MatrixType const& matrix_assignment: span) { + matrix_state *= matrix_assignment; + } + } +} + +// MARK: - Abstract Operators + +void LinearSubspace::Add(Instruction const& inst) { + auto op1 = inst.getOperand(0); + auto op2 = inst.getOperand(1); + + // [xi := bj + bk] + if (isa(op1) && (isa(op2))) { + auto b1 = dyn_cast(op1); + auto b2 = dyn_cast(op2); + return affineAssignment(&inst, 1, nullptr, b1->getSExtValue() + b2->getSExtValue() ); + // [xi := b + xj] + } else if (isa(op1) && isa(op2)) { + auto b = dyn_cast(op1); + return affineAssignment(&inst, 1, op2, b->getSExtValue()); + // [xi := xj + b] + } else if (isa(op2) && isa(op1)) { + auto b = dyn_cast(op2); + return affineAssignment(&inst, 1, op1, b->getSExtValue()); + } else { + return affineAssignment(&inst, {{op1,1},{op2,1}}, 0); + } +} + +void LinearSubspace::Sub(Instruction const& inst) { + auto op1 = inst.getOperand(0); + auto op2 = inst.getOperand(1); + + // [xi := bj - bk] + if (isa(op1) && (isa(op2))) { + auto b1 = dyn_cast(op1); + auto b2 = dyn_cast(op2); + return affineAssignment(&inst, 1, nullptr, b1->getSExtValue() - b2->getSExtValue() ); + // [xi := b - xj] + } else if (isa(op1) && isa(op2)) { + auto b = dyn_cast(op1); + return affineAssignment(&inst, 1, op2, -b->getSExtValue()); + // [xi := xj - b] + } else if (isa(op2) && isa(op1)) { + auto b = dyn_cast(op2); + return affineAssignment(&inst, 1, op1, -b->getSExtValue()); + } else { + return affineAssignment(&inst, {{op1,1},{op2,1}}, 0); + } +} + +void LinearSubspace::Mul(Instruction const& inst) { + auto op1 = inst.getOperand(0); + auto op2 = inst.getOperand(1); + + // [xi := aj * ak] + if (isa(op1) && (isa(op2))) { + auto b1 = dyn_cast(op1); + auto b2 = dyn_cast(op2); + return affineAssignment(&inst, 1, nullptr, b1->getSExtValue() * b2->getSExtValue() ); + // [xi := a * xj] + } else if (isa(op1) && isa(op2)) { + auto a = dyn_cast(op1); + return affineAssignment(&inst, a->getSExtValue(), op2, 0); + // [xi := xj * a] + } else if (isa(op2) && isa(op1)) { + auto a = dyn_cast(op2); + return affineAssignment(&inst, a->getSExtValue(), op1, 0); + } else { + return nonDeterminsticAssignment(&inst); + } +} + +// MARK: - Helpers + +unordered_map createVariableIndexMap_impl(Function const& func, int& count, set& visited_funcs) { + unordered_map map; + visited_funcs.insert(&func); + for (BasicBlock const& basic_block: func) { + for (Instruction const& inst: basic_block) { + if (isa(inst.getType()) || isa(&inst)) { + count++; + map[&inst] = count; + } + if (CallInst const* call = dyn_cast(&inst)) { + Function const* callee_func = call->getCalledFunction(); + if (callee_func->empty()) { + continue; + } + if (visited_funcs.count(callee_func) == 0) { + for (Argument const& arg: callee_func->args()) { + if (isa(arg.getType())) { + count++; + map[&arg] = count; + } + } + unordered_map callee_map = createVariableIndexMap_impl(*callee_func, count, visited_funcs); + map.merge(callee_map); + } + } + } + } + return map; +} + +unordered_map LinearSubspace::createVariableIndexMap(Function const& func) { + int count = 0; + set visited_funcs = {}; + return createVariableIndexMap_impl(func, count, visited_funcs); +} + +// MARK: - debug output + +unordered_map reverseMap(unordered_map const& map) { + unordered_map reversed; + for (auto [key, value]: map) { + reversed[value] = key; + } + return reversed; +} + +void LinearSubspace::print() const { + dbgs(3) << *this; +} + +void LinearSubspace::printIncoming(BasicBlock const& bb, raw_ostream& out, int indentation) const { + out << *this; +} + +void LinearSubspace::printOutgoing(BasicBlock const& bb, raw_ostream& out, int indentation) const { + MatrixType nullspace = MatrixType::null(MatrixType(this->basis)); + + auto reversed = reverseMap(index); + for (int i = 1; i <= int(index.size()); i++) { + auto val = reversed.at(i); + if (val->hasName()) { + out << left_justify(val->getName(), 6); + } else { + out << left_justify("<>", 6); + } + } + + out << "\n" << nullspace; +} + +void LinearSubspace::debug_output(Instruction const& inst, MatrixType operands) { + dbgs(3) << *this; +} + + +raw_ostream& operator<<(raw_ostream& os, LinearSubspace const& relation) { + auto reversed = reverseMap(relation.index); + if (relation.basis.empty()) { + return os << "[]\n"; + } + for (auto m: relation.basis) { + os << left_justify("", 8); + for (int i = 1; i <= int(relation.index.size()); i++) { + auto val = reversed.at(i); + if (val->hasName()) { + os << left_justify(val->getName(), 6); + } else { + os << left_justify("<>", 6); + } + } + os << "\n" << m << "\n"; + } + return os; +} + +} diff --git a/src/linear_subspace.h b/src/linear_subspace.h new file mode 100644 index 0000000000000000000000000000000000000000..89a12e81802486d9d12d58eb956513baa76cdc3f --- /dev/null +++ b/src/linear_subspace.h @@ -0,0 +1,78 @@ +#pragma once + +#include + +#include "global.h" +#include "simple_matrix.h" +#include "sparse_matrix.h" + +#include + +namespace pcpo { + +class LinearSubspace { +private: + /// Only valid when `createVariableIndexMap` has been generated. + int getNumberOfVariables() const { return index.size(); }; + std::unordered_map createVariableIndexMap(llvm::Function const& func); +public: + /// Type used for Matrix values. + using T = double; + using MatrixType = SparseMatrix; + + std::unordered_map index; + std::vector basis; + bool isBottom = true; + int getWidth() { return index.size() + 1; }; + int getHeight() { return index.size() + 1; }; + + LinearSubspace() = default; + LinearSubspace(LinearSubspace const& state) = default; + virtual ~LinearSubspace() = default; + + explicit LinearSubspace(llvm::Function const& func); + /// This constructor is used to initialize the state of a function call, to which parameters are passed. + /// This is the "enter" function as described in "Compiler Design: Analysis and Transformation" + explicit LinearSubspace(llvm::Function const* callee_func, LinearSubspace const& state, llvm::CallInst const* call); + + /// Handles the evaluation of merging points + void applyPHINode(llvm::BasicBlock const& bb, std::vector const& pred_values, llvm::Instruction const& phi); + /// Handles the evaluation of function calls + /// This is the "combine" function as described in "Compiler Design: Analysis and Transformation" + void applyCallInst(llvm::Instruction const& inst, llvm::BasicBlock const* end_block, LinearSubspace const& callee_state); + /// Handles the evaluation of return instructions + void applyReturnInst(llvm::Instruction const& inst); + /// Handles the evaluation of all other instructions + void applyDefault(llvm::Instruction const& inst); + bool merge(Merge_op::Type op, LinearSubspace const& other); + void branch(llvm::BasicBlock const& from, llvm::BasicBlock const& towards) { return; }; + bool leastUpperBound(LinearSubspace const& rhs); + + bool checkOperandsForBottom(llvm::Instruction const& inst) { return false; } + + void printIncoming(llvm::BasicBlock const& bb, llvm::raw_ostream& out, int indentation) const; + void printOutgoing(llvm::BasicBlock const& bb, llvm::raw_ostream& out, int indentation) const; + + // Abstract Assignments + void affineAssignment(llvm::Value const* xi, T a, llvm::Value const* xj, T b); + void affineAssignment(llvm::Value const* xi, std::unordered_map relations, T constant); + void nonDeterminsticAssignment(llvm::Value const* xi); + + virtual void print() const; + +protected: + // Abstract Operators + void Add(llvm::Instruction const& inst); + void Sub(llvm::Instruction const& inst); + void Mul(llvm::Instruction const& inst); + + /// Used for debug output + void debug_output(llvm::Instruction const& inst, MatrixType operands); + + MatrixType createTransformationMatrix(llvm::Instruction const& inst); +}; + +llvm::raw_ostream& operator<<(llvm::raw_ostream& os, LinearSubspace const& relation); + + +} diff --git a/src/normalized_conjunction.cpp b/src/normalized_conjunction.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7c2b8d2b3b8f5cbd7a4cc4a59b91cce585fbdc06 --- /dev/null +++ b/src/normalized_conjunction.cpp @@ -0,0 +1,662 @@ +// +// normalized_conjunction.cpp +// ADTTests +// +// Created by Tim Gymnich on 17.1.20. +// + +#include "normalized_conjunction.h" + +#include "llvm/IR/CFG.h" + +using namespace llvm; + +namespace pcpo { + +// MARK: - Initializers + +NormalizedConjunction::NormalizedConjunction(Function const& f) { + for (Argument const& arg: f.args()) { + get(&arg) = LinearEquality(&arg); + validVariables.insert(&arg); + } + isBottom = f.arg_empty(); +} + +NormalizedConjunction::NormalizedConjunction(Function const* callee_func, NormalizedConjunction const& state, CallInst const* call) { + assert(callee_func->arg_size() == call->getNumArgOperands()); + for (Argument const& arg: callee_func->args()) { + Value* value = call->getArgOperand(arg.getArgNo()); + if (value->getType()->isIntegerTy()) { + if (ConstantInt const* c = dyn_cast(value)) { + get(&arg) = { &arg, 1 , nullptr, c->getSExtValue() }; + } else if (state.values.count(value) > 0) { + LinearEquality value_equality = state.values.at(value); + LinearEquality eq = { &arg, value_equality.a , value_equality.x, value_equality.b }; + get(&arg) = { &arg, value_equality.a , value_equality.x, value_equality.b }; + } else { + get(&arg) = { &arg, 1 , value, 0 }; + } + validVariables.insert(&arg); + } + } + isBottom = false; +} + +NormalizedConjunction::NormalizedConjunction(std::unordered_map const& equalaties) { + this->values = equalaties; + isBottom = equalaties.empty(); + for (auto& [key, value]: equalaties) { + validVariables.insert(key); + } +} + + +// MARK: - AbstractState Interface + +/// Handles the evaluation of merging points +void NormalizedConjunction::applyPHINode(BasicBlock const& bb, std::vector pred_values, + Instruction const& phi) { + PHINode const* phiNode = dyn_cast(&phi); + + int i = 0; + + for (BasicBlock const* pred_bb: llvm::predecessors(&bb)) { + auto& incoming_value = *phiNode->getIncomingValueForBlock(pred_bb); + auto& incoming_state = pred_values[i]; + + if (ConstantInt const* c = dyn_cast(&incoming_value)) { + NormalizedConjunction acc = *this; + acc.linearAssignment(&phi, 1, nullptr, c->getSExtValue()); + merge(Merge_op::UPPER_BOUND, acc); + } else if (incoming_state.values.count(&incoming_value) != 0) { + NormalizedConjunction acc = *this; + LinearEquality pred_value = incoming_state[&incoming_value]; + acc.linearAssignment(&phi, pred_value.a, pred_value.x, pred_value.b); + merge(Merge_op::UPPER_BOUND, acc); + } else { + NormalizedConjunction acc = *this; + acc.nonDeterminsticAssignment(&phi); + merge(Merge_op::UPPER_BOUND, acc); + } + i++; + } +} + +void NormalizedConjunction::applyCallInst(Instruction const& inst, BasicBlock const* end_block, NormalizedConjunction const& callee_state) { + std::vector operands; + + //iterate through all instructions of it till we find a return statement + for (auto& iter_inst: *end_block) { + if (ReturnInst const* ret_inst = dyn_cast(&iter_inst)) { + Value const* ret_val = ret_inst->getReturnValue(); + dbgs(4) << "\t\tFound return instruction\n"; + if (callee_state.values.find(ret_val) != callee_state.values.end()) { + dbgs(4) << "\t\tReturn evaluated, merging parameters\n"; + LinearEquality retEq = callee_state.values.at(ret_val); + get(&inst) = {&inst, retEq.a, retEq.x, retEq.b}; + validVariables.insert(&inst); + } else { + dbgs(4) << "\t\tReturn not evaluated, setting to bottom\n"; + } + } + } +} + +void NormalizedConjunction::applyReturnInst(Instruction const& inst) { + Value const* ret_val = dyn_cast(&inst)->getReturnValue(); + if (ret_val && ret_val->getType()->isIntegerTy()) { + if (ConstantInt const* c = dyn_cast(ret_val)) { + get(&inst) = LinearEquality(c); + } else if (values.find(ret_val) != values.end()) { + LinearEquality eq = values.at(ret_val); + get(&inst) = {&inst, eq.a, eq.x, eq.b}; + } + } + validVariables.insert(&inst); +} + +void NormalizedConjunction::applyDefault(Instruction const& inst) { + std::vector operands; + + if (inst.getNumOperands() != 2) return nonDeterminsticAssignment(&inst); + + // We only deal with integer types + IntegerType const* type = dyn_cast(inst.getType()); + if (not type) return nonDeterminsticAssignment(&inst); + + type = dyn_cast(inst.getOperand(0)->getType()); + if (not type) return nonDeterminsticAssignment(&inst); + + type = dyn_cast(inst.getOperand(1)->getType()); + if (not type) return nonDeterminsticAssignment(&inst); + + if (isa(inst.getOperand(0)) || isa(inst.getOperand(1))) { + return nonDeterminsticAssignment(&inst); + } + + for (Value const* value: inst.operand_values()) { + operands.push_back(LinearEquality(value)); + } + + switch (inst.getOpcode()) { + case Instruction::Add: + return Add(inst); + case Instruction::Sub: + return Sub(inst); + case Instruction::Mul: + return Mul(inst); + case Instruction::SDiv: + case Instruction::UDiv: + default: + return nonDeterminsticAssignment(&inst); + } + + debug_output(inst, operands); +} + +bool NormalizedConjunction::merge(Merge_op::Type op, NormalizedConjunction const& other) { + if (other.isBottom) { + return false; + } else if (isBottom) { + values = other.values; + validVariables = other.validVariables; + isBottom = false; + return true; + } + + switch (op) { + case Merge_op::UPPER_BOUND: return leastUpperBound(other); + default: abort(); + } +} + +// MARK: - Lattice Operations + +bool NormalizedConjunction::leastUpperBound(NormalizedConjunction rhs) { + // set of all occuring variables in E1 and E2 + std::set vars, varsE1, varsE2; + std::set E1, E2; + + auto mapToSeccond = [](std::pair p){ return p.second; }; + transform(values, std::inserter(E1, E1.end()), mapToSeccond); + transform(rhs.values, std::inserter(E2, E2.end()), mapToSeccond); + + auto mapToY = [](LinearEquality eq){ return eq.y; }; + transform(E1, std::inserter(varsE1, varsE1.end()), mapToY); + transform(E2, std::inserter(varsE2, varsE2.end()), mapToY); + std::set_union(varsE1.begin(), varsE1.end(), varsE2.begin(), varsE2.end(), std::inserter(vars, vars.end())); + + std::set dX1, dX2; + + std::set_difference(vars.begin(), vars.end(), varsE1.begin(), varsE1.end(), std::inserter(dX1, dX1.end())); + std::set_difference(vars.begin(), vars.end(), varsE2.begin(), varsE2.end(), std::inserter(dX2, dX2.end())); + + // extend E1 by trivial equalities + for (auto d: dX1) { + if (validVariables.count(d) > 0) { + LinearEquality eq = {d, 1, d, 0}; + E1.insert(eq); + } else { + E1.insert(rhs[d]); + } + } + + // extend E2 by trivial equalities + for (auto d: dX2) { + if (rhs.validVariables.count(d) > 0) { + LinearEquality eq = {d, 1, d, 0}; + E2.insert(eq); + } else { + E2.insert(operator[](d)); + } + } + + std::set X0 = computeX0(E1, E2); + std::set X1 = computeX1(E1, E2); + std::set X2 = computeX2(E1, E2); + std::set X3 = computeX2(E2, E1); + std::set X4 = computeX4(E1, E2); + + // E1 U E2 = E'0 AND E'1 AND E'2 AND E'3 AND E'4 + + std::set leastUpperBound; + leastUpperBound.insert(X0.begin(), X0.end()); + leastUpperBound.insert(X1.begin(), X1.end()); + leastUpperBound.insert(X2.begin(), X2.end()); + leastUpperBound.insert(X3.begin(), X3.end()); + leastUpperBound.insert(X4.begin(), X4.end()); + + std::unordered_map result; + + auto addMapping = [](LinearEquality eq){ return std::make_pair(eq.y,eq); }; + transform(leastUpperBound, std::inserter(result, result.end()), addMapping); + + bool changed = values != result; + + values = result; + validVariables.insert(rhs.validVariables.begin(), rhs.validVariables.end()); + + return changed; +} + +// MARK: Helpers + +/// XO / E'0: set of variables where the right hand side in E1 and E2 coincide +std::set NormalizedConjunction::computeX0(std::set const& E1, std::set const& E2) { + std::set X0; + std::set_intersection(E1.begin(), E1.end(), E2.begin(), E2.end(), std::inserter(X0, X0.end())); + // Remove trivial equalities + std::set filteredX0; + auto filterTrivialEqualaties = [](LinearEquality eq){ return eq.y != eq.x;}; + copy_if(X0, std::inserter(filteredX0, filteredX0.end()), filterTrivialEqualaties); + + return filteredX0; +} + +/// X1 / E'1: set of variables where the right hand side is constant but does not coincide in E1 and E2 +std::set NormalizedConjunction::computeX1(std::set const& E1, std::set const& E2) { + std::set X1; + std::set> differentConstants; + + assert(E1.size() == E2.size() && "E1 and E2 should have the same set of variables in the same order"); + + for (auto itE1 = E1.begin(), itE2 = E2.begin(); itE1 != E1.end() && itE2 != E2.end(); ++itE1, ++itE2) { + auto eq1 = *itE1; + auto eq2 = *itE2; + assert(eq1.y == eq2.y && "left hand side of equations should be the same"); + if (eq1.isConstant() && eq2.isConstant() && eq1.b != eq2.b) { + differentConstants.insert({eq1,eq2}); + } + } + + if (!differentConstants.empty()) { + // pop first element + std::pair h = *differentConstants.begin(); + differentConstants.erase(differentConstants.begin()); + + for (auto i: differentConstants) { + // find a linear equation that contains both points P1(c(1)i, c(1)h) and P2(c(2)i, c(2)h) + // y = a * x + b + auto y = i.first.y; + int64_t a = ((i.second.b - i.first.b)) / (h.second.b - h.first.b); + auto x = h.first.y; + int64_t b = -a * h.first.b + i.first.b; + LinearEquality eq = {y, a, x, b}; + X1.insert(eq); + } + } + return X1; +} + +/// X2 / E'2: set of variables where the right hand side of E1 is constant but the rhs of E2 contains a variable. +std::set NormalizedConjunction::computeX2(std::set const& E1, std::set const& E2) { + std::set X2; + std::set> differentConstants; + + assert(E1.size() == E2.size() && "E1 and E2 should have the same set of variables in the same order"); + + for (auto itE1 = E1.begin(), itE2 = E2.begin(); itE1 != E1.end() && itE2 != E2.end(); ++itE1, ++itE2) { + auto eq1 = *itE1; + auto eq2 = *itE2; + assert(eq1.y == eq2.y && "left hand side of equations should be the same"); + if (eq1.isConstant() && !eq2.isConstant()) { + differentConstants.insert({eq1,eq2}); + } + } + + std::vector>> Pi2; + + for (auto jt = differentConstants.begin(); jt != differentConstants.end();) { + std::set> equivalenceClass; + auto j = *jt; + equivalenceClass.insert(j); + jt = differentConstants.erase(jt); + + // partition differentConstants + for (auto it = jt; it != differentConstants.end(); ) { + auto i = *it; + bool condition1 = i.second.x == j.second.x; + bool condition2 = (i.first.b - i.second.b) / (i.second.a) == (j.first.b - j.second.b) / (j.second.a); + if (condition1 && condition2) { + equivalenceClass.insert(i); + it = differentConstants.erase(it); + jt = differentConstants.begin(); + } else { + it++; + } + } + + Pi2.push_back(equivalenceClass); + } + + // form equaltites for partitions in Pi2 + for (auto q: Pi2) { + auto h = *q.begin(); + q.erase(q.begin()); + for (auto i: q) { + // xi = ai/ah * xh + ( bi - (ai * bh) / ah) + auto y = i.first.y; + auto a = i.second.a / h.second.a; + auto x = h.first.y; + auto b = i.second.b - (i.second.a * h.second.b) / h.second.a; + LinearEquality eq = {y, a, x, b}; + X2.insert(eq); + } + } + return X2; +} + +std::set NormalizedConjunction::computeX4(std::set const& E1, std::set const& E2) { + std::set X4; + std::set> differentConstants; + + assert(E1.size() == E2.size() && "E1 and E2 should have the same set of variables in the same order"); + + for (auto itE1 = E1.begin(), itE2 = E2.begin(); itE1 != E1.end() && itE2 != E2.end(); ++itE1, ++itE2) { + auto eq1 = *itE1; + auto eq2 = *itE2; + assert(eq1.y == eq2.y && "left hand side of equations should be the same"); + if (!eq1.isConstant() && !eq2.isConstant() && eq1 != eq2) { + differentConstants.insert({eq1,eq2}); + } + } + + std::vector>> Pi4; + + // partition differentConstants + for (auto it = differentConstants.begin(); it != differentConstants.end();) { + std::set> equivalenceClass; + std::pair i = *it; + equivalenceClass.insert(i); + it = differentConstants.erase(it); + + for (auto jt = it; jt != differentConstants.end(); ) { + std::pair j = *jt; + bool condition1 = i.first.x == j.first.x && i.second.x == j.second.x; + bool condition2 = i.second.a / (i.first.a) == j.second.a / (j.first.a); + bool condition3 = (i.first.b - i.second.b) / (i.first.a) == (j.first.b - j.second.b) / (j.first.a); + if (condition1 && condition2 && condition3) { + equivalenceClass.insert(j); + jt = differentConstants.erase(jt); + it = differentConstants.begin(); + } else { + jt++; + } + } + + Pi4.push_back(equivalenceClass); + } + + // form equaltites for partitions in Pi4 + for (auto q: Pi4) { + auto h = *q.begin(); + q.erase(q.begin()); + for (auto i: q) { + // xi = ai/ah * xh + ( bi - (ai * bh) / ah) + auto y = i.first.y; + auto a = i.second.a / h.second.a; + auto x = h.first.y; + auto b = i.second.b - (i.second.a * h.second.b) / (h.second.a); + LinearEquality eq = {y, a, x, b}; + X4.insert(eq); + } + } + return X4; +} + +// MARK: - Abstract Assignments + +/// [xi := ?] +void NormalizedConjunction::nonDeterminsticAssignment(Value const* xi) { + assert(xi != nullptr && "xi cannot be NULL"); + auto xj = get(xi).x; + validVariables.insert(xi); + + if (xi != xj && xj != 0) { + get(xi) = {xi, 1, xi, 0}; + } else { + // find all equations using xi + auto predicate = [&xi](std::pair p){ return p.second.x == xi && p.second.y != xi ;}; + auto it = std::find_if(values.begin(), values.end(), predicate); + if (it != values.end()) { + auto xk = (*it).second; + for (it = std::find_if(++it, values.end(), predicate); + it != values.end(); + it = std::find_if(++it, values.end(), predicate)) { + auto& xl = it->second; + get(xl.y) = {xl.y, 1, xk.y, xl.b - xk.b}; + } + get(xk.y) = {xk.y, 1, xk.y, 0}; + } + get(xi) = {xi, 1, xi, 0}; + } + +} + +/// [xi := a * xj + b] +void NormalizedConjunction::linearAssignment(Value const* xi, int64_t a, Value const* xj, int64_t b) { + assert(xi != nullptr && "xi cannot be NULL"); + + nonDeterminsticAssignment(xi); + validVariables.insert(xi); + + // make sure xj exists + auto xjS = values.find(xj) != values.end() ? get(xj).x : xj; + auto bS = values.find(xj) != values.end() ? get(xj).b : 0; + auto aS = values.find(xj) != values.end() ? get(xj).a : 1; + + if (!(a % aS == 0 && (-bS - b) % aS == 0)) { + // Precison loss due to int division! Abort + return; + } + + if (xi > xjS) { + get(xi) = {xi, aS * a, xjS, a * bS + b}; + return; + } else { + auto pred = [&xjS](std::pair p){ return p.second.x == xjS && p.second.y != xjS; }; + for (auto xk: make_filter_range(values, pred)) { + get(xk.second.y) = {xk.second.y, xk.second.a * a/aS, xi, (-bS - b) / aS + xk.second.b}; + } + get(xjS) = {xjS, a/aS, xi, (-bS - b) / aS}; + } +} + +// MARK: - Abstract Operations + +// [xi := xj + b] +// [xi := xj + xk] +// [xi := bj + bk] +void NormalizedConjunction::Add(Instruction const& inst) { + auto op1 = inst.getOperand(0); + auto op2 = inst.getOperand(1); + + // [xi := bj + bk] + if (isa(op1) && (isa(op2))) { + auto b1 = dyn_cast(op1); + auto b2 = dyn_cast(op2); + return linearAssignment(&inst, 1, nullptr, b1->getSExtValue() + b2->getSExtValue() ); + // [xi := b + xj] + } else if (isa(op1) && isa(op2)) { + auto b = dyn_cast(op1); + return linearAssignment(&inst, 1, op2, b->getSExtValue()); + // [xi := xj + b] + } else if (isa(op2) && isa(op1)) { + auto b = dyn_cast(op2); + return linearAssignment(&inst, 1, op1, b->getSExtValue()); + // [xi := xj + xk] + } else if (isa(op1) && isa(op2)) { + // [xi := bj + xk] + if (get(op1).isConstant()) { + return linearAssignment(&inst, 1, op2, get(op1).b); + // [xi := xj + bk] + } else if (get(op2).isConstant()) { + return linearAssignment(&inst, 1, op1, get(op2).b); + // [xi := xj + xk] + } else { + return nonDeterminsticAssignment(&inst); + } + // [xi := bj + bk] + } else { + assert(false); + return nonDeterminsticAssignment(&inst); + } +} + +/// [xi := xj - x] +void NormalizedConjunction::Sub(Instruction const& inst) { + auto op1 = inst.getOperand(0); + auto op2 = inst.getOperand(1); + + // [xi := bj - bk] + if (isa(op1) && (isa(op2))) { + auto b1 = dyn_cast(op1); + auto b2 = dyn_cast(op2); + return linearAssignment(&inst, 1, nullptr, b1->getSExtValue() - b2->getSExtValue() ); + // [xi := b - xj] + } else if (isa(op1) && isa(op2)) { + auto b = dyn_cast(op1); + return linearAssignment(&inst, 1, op2, -b->getSExtValue()); + // [xi := xj - b] + } else if (isa(op2) && isa(op1)) { + auto b = dyn_cast(op2); + return linearAssignment(&inst, 1, op1, -b->getSExtValue()); + // [xi := xj - xk] + } else if (isa(op1) && isa(op2)) { + // [xi := bj - xk] + if (get(op1).isConstant()) { + return linearAssignment(&inst, 1, op2, -get(op1).b); + // [xi := xj - bk] + } else if (get(op2).isConstant()) { + return linearAssignment(&inst, 1, op1, -get(op2).b); + // [xi := xj - xk] + } else { + return nonDeterminsticAssignment(&inst); + } + } else { + assert(false); + return nonDeterminsticAssignment(&inst); + } +} + +// [xi := a * xj] +void NormalizedConjunction::Mul(Instruction const& inst) { + auto op1 = inst.getOperand(0); + auto op2 = inst.getOperand(1); + + // [xi := aj * ak] + if (isa(op1) && (isa(op2))) { + auto b1 = dyn_cast(op1); + auto b2 = dyn_cast(op2); + return linearAssignment(&inst, 1, nullptr, b1->getSExtValue() * b2->getSExtValue() ); + // [xi := a * xj] + } else if (isa(op1) && isa(op2)) { + auto a = dyn_cast(op1); + int64_t a_val = a->getSExtValue(); + if (a_val == 0) { + return linearAssignment(&inst, 1, nullptr, 0); + } else { + return linearAssignment(&inst, a_val, op2, 0); + } + // [xi := xj * a] + } else if (isa(op2) && isa(op1)) { + auto a = dyn_cast(op2); + int64_t a_val = a->getSExtValue(); + if (a_val == 0) { + return linearAssignment(&inst, 1, nullptr, 0); + } else { + return linearAssignment(&inst, a_val, op2, 0); + } + // [xi := xj * xk] + } else if (isa(op1) && isa(op2)) { + // [xi := aj * xk] + if (get(op1).isConstant()) { + if (get(op1).b == 0) { + return linearAssignment(&inst, 1, nullptr, 0); + } else { + return linearAssignment(&inst, get(op1).b, op2, 0); + } + // [xi := xj * ak] + } else if (get(op2).isConstant()) { + if (get(op2).b == 0) { + return linearAssignment(&inst, 1, nullptr, 0); + } else { + return linearAssignment(&inst, get(op2).b, op1, 0); + } + // [xi := xj * xk] + } else { + return nonDeterminsticAssignment(&inst); + } + } else { + assert(false); + return nonDeterminsticAssignment(&inst); + } +} + +// MARK: - Operators + +LinearEquality& NormalizedConjunction::operator[](Value const* value) { + return get(value); +} + +LinearEquality& NormalizedConjunction::get(Value const* value) { + if (values.count(value) == 0) { + LinearEquality eq = {value, 1, value, 0}; + values[value] = eq; + } + return values[value]; +} + +// MARK: - Debug + +void NormalizedConjunction::debug_output(Instruction const& inst, std::vector operands) { + dbgs(3).indent(2) << inst << " // " << values.at(&inst) << ", args "; + {int i = 0; + for (Value const* value: inst.operand_values()) { + if (i) dbgs(3) << ", "; + if (value->getName().size()) dbgs(3) << '%' << value->getName() << " = "; + dbgs(3) << operands[i]; + ++i; + }} + dbgs(3) << '\n'; +} + +void NormalizedConjunction::printIncoming(BasicBlock const& bb, raw_ostream& out, int indentation = 0) const { + // @Speed: This is quadratic, could be linear + bool nothing = true; + for (std::pair const& i: values) { + bool read = false; + bool written = false; + for (Instruction const& inst: bb) { + if (&inst == i.first) written = true; + for (Value const* v: inst.operand_values()) { + if (v == i.first) read = true; + } + } + + if (read and not written) { + out.indent(indentation) << '%' << i.first->getName() << " = " << i.second << '\n'; + nothing = false; + } + } + if (nothing) { + out.indent(indentation) << "\n"; + } +} + + +void NormalizedConjunction::printOutgoing(BasicBlock const& bb, raw_ostream& out, int indentation = 0) const { + int nrOfNonTrivialEquations = 0; + for (auto const& i: values) { + if (ReturnInst::classof(i.first)) { + out.indent(indentation) << " = " << i.second << '\n'; + } else { + out.indent(indentation) << '%' << i.first->getName() << " = " << i.second << '\n'; + } + nrOfNonTrivialEquations += !i.second.isTrivial(); + } + out.indent(indentation) << nrOfNonTrivialEquations << " non-trivial equations\n"; +} + +} /* end of namespace pcpo */ diff --git a/src/normalized_conjunction.h b/src/normalized_conjunction.h new file mode 100644 index 0000000000000000000000000000000000000000..4d77d9314228ceb45274b37ca13786db87684ed1 --- /dev/null +++ b/src/normalized_conjunction.h @@ -0,0 +1,79 @@ +// +// conjunction.h +// PAIN +// +// Created by Tim Gymnich on 17.1.20. +// + +#pragma once + +#include +#include +#include + +#include + +#include "global.h" +#include "linear_equality.h" + +namespace pcpo { + +class NormalizedConjunction { +public: + std::unordered_map values; + std::set validVariables; + bool isBottom = true; + + NormalizedConjunction() = default; + NormalizedConjunction(NormalizedConjunction const& state) = default; + NormalizedConjunction(std::unordered_map const& equalaties); + + explicit NormalizedConjunction(llvm::Function const& f); + /// This constructor is used to initialize the state of a function call, to which parameters are passed. + /// This is the "enter" function as described in "Compiler Design: Analysis and Transformation" + explicit NormalizedConjunction(llvm::Function const* callee_func, NormalizedConjunction const& state, llvm::CallInst const* call); + + /// Handles the evaluation of merging points + void applyPHINode(llvm::BasicBlock const& bb, std::vector pred_values, llvm::Instruction const& phi); + /// Handles the evaluation of function calls + /// This is the "combine" function as described in "Compiler Design: Analysis and Transformation" + void applyCallInst(llvm::Instruction const& inst, llvm::BasicBlock const* end_block, NormalizedConjunction const& callee_state); + /// Handles the evaluation of return instructions + void applyReturnInst(llvm::Instruction const& inst); + /// Handles the evaluation of all other instructions + void applyDefault(llvm::Instruction const& inst); + bool merge(Merge_op::Type op, NormalizedConjunction const& other); + void branch(llvm::BasicBlock const& from, llvm::BasicBlock const& towards) { return; }; + bool leastUpperBound(NormalizedConjunction rhs); + + bool checkOperandsForBottom(llvm::Instruction const& inst) { return false; } + + void printIncoming(llvm::BasicBlock const& bb, llvm::raw_ostream& out, int indentation) const; + void printOutgoing(llvm::BasicBlock const& bb, llvm::raw_ostream& out, int indentation) const; + + // Abstract Assignments + void linearAssignment(llvm::Value const* xi, int64_t a, llvm::Value const* xj, int64_t b); + void nonDeterminsticAssignment(llvm::Value const* xi); + + // Operators + LinearEquality& operator[](llvm::Value const*); + LinearEquality& get(llvm::Value const*); + +protected: + // Abstract Operators + void Add(llvm::Instruction const& inst); + void Sub(llvm::Instruction const& inst); + void Mul(llvm::Instruction const& inst); + + /// Used for debug output + void debug_output(llvm::Instruction const& inst, std::vector operands); + + // Helpers + static std::set computeX0(std::set const& E1, std::set const& E2); + static std::set computeX1(std::set const& E1, std::set const& E2); + static std::set computeX2(std::set const& E1, std::set const& E2); + static std::set computeX4(std::set const& E1, std::set const& E2); +}; + + +} diff --git a/src/simple_interval.cpp b/src/simple_interval.cpp index b43ec0f452899673a5c164d943c409e5f3c4db3a..2e60e1142f148d69fdd4386db2c2eac9597c4831 100644 --- a/src/simple_interval.cpp +++ b/src/simple_interval.cpp @@ -3,25 +3,27 @@ namespace pcpo { SimpleInterval::SimpleInterval(llvm::Constant const& constant) { - if (llvm::ConstantInt const* c = llvm::dyn_cast(&constant)) { - state = NORMAL; - begin = c->getValue(); - end = c->getValue(); - return; - } - // Depending on how you want to handle undef values, you might want to consider them as any - // value (e.g. 0). - // if (llvm::UndefValue const* c = llvm::dyn_cast(&constant)) { - // if (llvm::IntegerType const* ty = llvm::dyn_cast(c->getType())) { - // state = NORMAL; - // begin = APInt::getNullValue(ty->getBitWidth()); - // end = begin; - // } - // state = TOP; - // return; - // } + if (llvm::ConstantInt const* c = llvm::dyn_cast(&constant)) { + state = NORMAL; + begin = c->getValue(); + end = c->getValue(); + return; + } + + + // Depending on how you want to handle undef values, you might want to consider them as any + // value (e.g. 0). + // if (llvm::UndefValue const* c = llvm::dyn_cast(&constant)) { + // if (llvm::IntegerType const* ty = llvm::dyn_cast(c->getType())) { + // state = NORMAL; + // begin = APInt::getNullValue(ty->getBitWidth()); + // end = begin; + // } + // state = TOP; + // return; + // } + state = TOP; - state = TOP; } SimpleInterval::SimpleInterval(APInt _begin, APInt _end) { @@ -31,7 +33,6 @@ SimpleInterval::SimpleInterval(APInt _begin, APInt _end) { end = _end; } - // Internal helper functions using APInt = llvm::APInt; @@ -107,6 +108,12 @@ SimpleInterval SimpleInterval::interpret( // We only deal with integer types llvm::IntegerType const* type = llvm::dyn_cast(inst.getType()); if (not type) return SimpleInterval {true}; + + type = llvm::dyn_cast(inst.getOperand(0)->getType()); + if (not type) return SimpleInterval {true}; + + type = llvm::dyn_cast(inst.getOperand(1)->getType()); + if (not type) return SimpleInterval {true}; unsigned bitWidth = inst.getOperand(0)->getType()->getIntegerBitWidth(); assert(bitWidth == inst.getOperand(1)->getType()->getIntegerBitWidth()); @@ -225,9 +232,6 @@ SimpleInterval SimpleInterval::merge(Merge_op::Type op, SimpleInterval a, Simple case Merge_op::UPPER_BOUND: return a._upperBound(b)._makeTopSpecial(); case Merge_op::WIDEN: return a._widen (b)._makeTopSpecial(); case Merge_op::NARROW: return a._narrow (b)._makeTopSpecial(); - default: - assert(false /* invalid op value */); - return SimpleInterval {true}; } } diff --git a/src/simple_interval.h b/src/simple_interval.h index 0a622bc29c4586ac7dd9fd7a549735f86d4e9180..703926304b3f3af23e2010983c9c9f9a86613fb9 100644 --- a/src/simple_interval.h +++ b/src/simple_interval.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include "global.h" diff --git a/src/simple_matrix.h b/src/simple_matrix.h new file mode 100644 index 0000000000000000000000000000000000000000..a1a9188cb3da2313b1deba07dae09fce41235fc9 --- /dev/null +++ b/src/simple_matrix.h @@ -0,0 +1,491 @@ +#pragma once + +#include "global.h" + +#include +#include +#include + +namespace pcpo { + +/// Matrix. Row and column are indexed beginning at 0 +template class Matrix { +protected: + std::vector> vectors; + int width; + int height; + +public: + //MARK: - Initializers + /// Creates a matrix with dimensions height x width and initalizes its values to `value` + /// @param width Width of the matrix + /// @param height Height of the matrix + /// @param value Initial value for each element + Matrix(int height, int width, T value) { + this->width = width; + this->height = height; + this->vectors.reserve(width); + for (int i = 0; i < height; i++) { + std::vector vector(width,value); + vectors.push_back(vector); + } + }; + + /// Creates a matrix with dimensions height x width and initalizes its values to 0 + /// @param width Width of the matrix + /// @param height Height of the matrix + Matrix(int height, int width) : Matrix(height, width, 0) {}; + + Matrix() = default; + Matrix(Matrix const& matrix) = default; + virtual ~Matrix() = default; + + + /// Creates an identity matrix with dimension eye x eye + /// @param eye dimension of the matrix + Matrix(int eye){ + this->width = eye; + this->height = eye; + this->vectors.reserve(width); + for (int i = 0; i < height; i++) { + std::vector vector(width,0); + vector[i] = 1; + vectors.push_back(vector); + } + }; + + /// Creates a matrix from a 2D vector + /// @param vectors 2D vector containing columns with rows + Matrix(std::vector> const &vectors) { + assert(all_of(vectors.begin(), vectors.end(), [&vectors](std::vector vec){ return vec.size() == vectors.front().size(); })); + this->width = vectors.empty() ? 0 : vectors.front().size(); + this->height = vectors.size(); + this->vectors = vectors; + }; + + /// Creates a column vector from a std::vector + /// @param vector the vector + Matrix(std::vector const &vector) { + std::vector> vectors = {vector}; + this->width = vector.size(); + this->height = vector.empty() ? 0 : 1; + this->vectors = vectors; + }; + + Matrix(std::vector const &values, int rows, int columns) { + assert(int(values.size()) == rows * columns); + vectors.reserve(rows); + for (int row = 0; row < rows; row++) { + std::vector rowVector; + rowVector.reserve(columns); + for (int column = 0; column < columns; column++) { + rowVector.push_back(values[row * rows + column]); + } + vectors.push_back(rowVector); + } + this->width = columns; + this->height = rows; + }; + + Matrix(std::vector const &vec) { + assert(all_of(vec.begin(), vec.end(), [&vec](Matrix m){ return m.getWidth() == vec.front().getWidth(); })); + this->height = 0; + int size = std::accumulate(vec.begin(), vec.end(), 0, [](int acc, Matrix m){ return acc + m.getHeight(); }); + vectors.reserve(size); + for (auto const &m: vec) { + vectors.insert(vectors.end(), m.vectors.begin(), m.vectors.end()); + this->height += m.height; + } + this->width = vec.empty() ? 0 : vec.front().width; + } + + // MARK: - Properties + + /// The height of the matrix (number of rows) + int getHeight() const { return height; }; + /// The width of the matrix (number of columns) + int getWidth() const { return width; }; + /// Returns true when the matrix is empty + bool empty() const { return getWidth() == 0 && getHeight() == 0; }; + + // MARK: - Matrix operations + + /// Transposes the matrix + Matrix transpose() const { + Matrix result = Matrix(width, height); + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + result.value(j,i) = value(i,j); + } + } + return result; + }; + + /// Transposes the matrix in place + void transposed() { + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + value(j,i) = value(i,j); + } + } + } + + /// Transforms the matrix to reduced row echelon form + Matrix echelonForm() const { + Matrix result = Matrix(*this); + int pivot = 0; + for (int row = 0; row < height; row++) { + if (pivot >= width) { + return result; + } + int i = row; + while (result.value(i,pivot) == 0) { + if (++i >= height) { + i = row; + if (++pivot >= width) { + return result; + } + } + } + result.swap_rows(i, row); + result.divide_row(row, result.value(row,pivot)); + for (i = 0; i < height; i++) { + if (i != row) { + result.add_multiple_row(i, row, -result.value(i,pivot)); + } + } + } + return result; + }; + + /// The rank of the matrix + int getRank() const { + Matrix e = echelonForm(); + int rank = 0; + for (int row = 0; row < height; row++) { + for (int column = 0; column < width; column++) { + if ((e.value(row,column) == 0) && (column == width - 1)) { + return rank; + } else if (e.value(row,column) != 0) { + break; + } + } + rank++; + } + return rank; + } + + /// Basis of the linear span of the column vectors + static Matrix span(Matrix const& matrix, bool transposed = false) { + std::vector> rows; + // if matrix is already transposed don't do it again + Matrix t = transposed ? matrix : matrix.transpose(); + Matrix te = t.echelonForm(); + int rank = te.getRank(); + rows.reserve(rank); + for (int row = 0; row < rank; row++) { + rows.push_back(te.row(row)); + } + return Matrix(rows).transpose(); + } + + /// Computes the null space for the column vectors + static Matrix null(Matrix const& matrix) { + auto rref = matrix.echelonForm(); + std::vector nonPivotColumns; + nonPivotColumns.reserve(matrix.getWidth()); + int offset = 0; + + for (int row = 0; row < rref.getWidth(); row++) { + for (int column = offset; column < rref.getWidth(); column++) { + if ((row < rref.getHeight() && rref(row,column) == 0) || row >= rref.getHeight()) { + std::vector vector(rref.getWidth()); + vector[column] = -1; + rref.vectors.insert(rref.vectors.begin()+row, vector); + nonPivotColumns.push_back(column); + rref.height += 1; + offset += 1; + row += 1; + if (row > rref.getHeight()) { + break; + } + } else if (row < rref.getHeight() && rref(row,column) == 1) { + offset += 1; + break; + } + } + } + + rref.height = rref.getWidth(); + rref.vectors.erase(rref.vectors.begin() + rref.getWidth(), rref.vectors.end()); + + std::vector> columns; + + // pick columns for result + for (auto column: nonPivotColumns) { + columns.push_back(rref.column(column)); + } + + return Matrix(columns).transpose(); + } + + /// Converts the matrix to a 1D Vector by stacking the column vectors + std::vector toVector() const { + std::vector result; + result.reserve(getWidth() * getHeight()); + for (int column = 0; column < getWidth(); column++) { + for (int row = 0; row < getHeight(); row++) { + result.push_back(value(row,column)); + } + } + return result; + } + + /// Converts a 1D Vector to a Matrix with given dimensions + /// @param rows number of rows + /// @param columns number of columns + Matrix reshape(int rows, int columns) const { + assert(rows > 0 && columns > 0); + Matrix t = transpose(); + return Matrix(t.vectors.front(), rows, columns).transpose(); + }; + + std::vector> reshapeColumns(int height, int width) const { + std::vector> result; + for (int c = 0; c < getWidth(); c++) { + result.push_back(Matrix(column(c), height, width).transpose()); + } + return result; + } + + /// Returns the value at row i and column j + /// @param row + /// @param column + T& value(int row, int column) { + assert(row < getHeight() && column < getWidth()); + return vectors[row][column]; + }; + + /// Returns the value at row i and column j + /// @param row + /// @param column + T const& value(int row, int column) const { + assert(row < getHeight() && column < getWidth()); + return vectors[row][column]; + }; + + /// Sets the value at row i and column j + /// @param row + /// @param column + /// @param value + void setValue(int row, int column, T val) { + value(row,column) = val; + } + + /// Returns a vector with the elements of the row at index i. The returned row can be modified. + /// @param i Index of the row to return. + std::vector& row(int i) { + assert(i < getHeight()); + return vectors[i]; + }; + + std::vector row(int i) const { + assert(i < getHeight()); + return vectors[i]; + }; + + /// Returns the column at index i. The returned column cannot be modified. + /// @param i Index of the column to return + std::vector column(int i) const { + assert(i < getWidth()); + std::vector row; + row.reserve(width); + for (std::vector const& x : vectors) { + row.push_back(x[i]); + } + return row; + } + + void setColumn(std::vector const& vector, int column) { + assert(int(vector.size()) == height); + for (int row = 0; row < height; row++) { + value(row,column) = vector[row]; + } + } + + // MARK: - Operators + + T& operator()(int i, int j) { return value(i,j); }; + + Matrix& operator *=(Matrix const& rhs) { + assert(width == rhs.height); + Matrix result = Matrix(height,rhs.width); + for (int i = 0; i < height; i++) { + for (int k = 0; k < width; k++) { + for (int j = 0; j < rhs.width; j++) { + result.value(i,j) += value(i,k) * rhs.value(k,j); + } + } + } + this->vectors = result.vectors; + this->width = result.width; + this->height = result.height; + return *this; + }; + + Matrix& operator *=(T scalar) { + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + value(i,j) *= scalar; + } + } + return *this; + }; + + + Matrix& operator +=(Matrix const& rhs) { + assert(rhs.width == width && rhs.height == height); + for (int i=0;i& operator +=(T scalar) { + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + value(i,j) += scalar; + } + } + return *this; + }; + + Matrix& operator -=(Matrix const& rhs) { + assert(rhs.width == width && rhs.height == height); + for (int i = 0; i < height; i++) { + for (int j= 0; j < width; j++) { + value(i,j) -= rhs.value(i,j); + } + } + return *this; + }; + + Matrix& operator -=(int scalar) { + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + value(i,j) -= scalar; + } + } + return *this; + }; + + bool operator==(Matrix const& rhs) const { + return vectors == rhs.vectors && width == rhs.width && height == rhs.height; + }; + + virtual void print() const { + dbgs(4) << *this; + } + +protected: + + // MARK: - Echelon helpers + + /// Swaps two rows + /// @param a index of the first row + /// @param b index of the second row + void swap_rows(int a, int b) { vectors[a].swap(vectors[b]); }; + + /// Divides a row by a constant + /// @param row index of the row to divide + /// @param quotient quotient to divide the row by + void divide_row(int row, T quotient) { + for (int column = 0; column < width; column++) { + value(row,column) /= quotient; + } + }; + + /// Adds a multiple of row b to a row a + /// @param a Row to add a multiple of b to + /// @param b Row to be added to row a + /// @param factor Factor to multiply row b with when adding it to row a + void add_multiple_row(int a, int b, T factor) { + for (int column = 0; column < width; column++) { + value(a,column) += value(b,column) * factor; + } + }; + + // MARK: - Utils + + /// Greates common divisor. + /// @param lhs + /// @param rhs + static int ggT(int lhs, int rhs) { + int h; + if (lhs == 0) { return abs(rhs); } + if (rhs == 0) { return abs(lhs); } + + do { + h = lhs % rhs; + lhs = rhs; + rhs = h; + } while (rhs != 0); + + return abs(lhs); + }; + + /// Least common multiple. + /// @param lhs + /// @param rhs + static int kgV(int lhs, int rhs) { return (lhs * rhs) / ggT(lhs, rhs); }; + + +}; + + +template +inline Matrix operator*(Matrix lhs, Matrix const& rhs) { return lhs *= rhs; }; +template +inline Matrix operator*(Matrix lhs, T scalar) { return lhs *= scalar; }; +template +inline Matrix operator+(Matrix lhs, Matrix const& rhs) { return lhs += rhs; }; +template +inline Matrix operator+(Matrix lhs, T scalar) { return lhs += scalar; }; +template +inline Matrix operator-(Matrix lhs, Matrix const& rhs) { return lhs -= rhs; }; +template +inline Matrix operator-(Matrix lhs, T scalar) { return lhs -= scalar; }; + +template +llvm::raw_ostream& operator<<(llvm::raw_ostream& os, Matrix const& matrix) { + for (int row = 0; row < matrix.getHeight(); row++) { + os << "[ "; + for (int column = 0; column < matrix.getWidth(); column++) { + if constexpr (std::is_floating_point_v) { + if (column == matrix.getWidth() - 1) { + os << llvm::format("%g", matrix.value(row,column)); + } else { + os << llvm::format("%-6g", matrix.value(row,column)); + } + } else { + if (column == matrix.getWidth() - 1) { + os << llvm::format("%d", matrix.value(row,column)); + } else { + os << llvm::format("%-6d", matrix.value(row,column)); + } + } + } + os << " ]\n"; + } + + if (matrix.getWidth() == 0 && matrix.getHeight() == 0) { + os << "[]\n"; + } + + return os; +}; + +} + diff --git a/src/sparse_matrix.h b/src/sparse_matrix.h new file mode 100644 index 0000000000000000000000000000000000000000..f05008878327a471f3cfd4b73f930209ae76d3b7 --- /dev/null +++ b/src/sparse_matrix.h @@ -0,0 +1,521 @@ +#pragma once + +#include "global.h" +#include "general.h" + +#include +#include +#include +#include + +namespace pcpo { + +/// Matrix. Row and column are indexed beginning at 0 +template class SparseMatrix { +protected: + // (row,column) -> T + std::unordered_map,T> values; + int width; + int height; + bool _transposed = false; + int getEstimatedSize() const { return width * height / 4; }; + +public: + //MARK: - Initializers + /// Creates a matrix with dimensions height x width and initalizes its values to `value` + /// @param width Width of the matrix + /// @param height Height of the matrix + /// @param value Initial value for each element + SparseMatrix(int height, int width, T value) { + this->width = width; + this->height = height; + if (value == 0) return; + values.reserve(height*value); + for (int row = 0; row < height; row++) { + for (int column = 0; column < width; column++) { + values[{row,column}] = value; + } + } + } + + /// Creates a matrix with dimensions height x width and initalizes its values to 0 + /// @param width Width of the matrix + /// @param height Height of the matrix + SparseMatrix(int height, int width) : SparseMatrix(height, width, 0) {}; + + SparseMatrix() = default; + SparseMatrix(SparseMatrix const& matrix) = default; + virtual ~SparseMatrix() = default; + + /// Creates an identity matrix with dimension eye x eye + /// @param eye dimension of the matrix + SparseMatrix(int eye) { + this->width = eye; + this->height = eye; + values.reserve(eye); + for (int i = 0; i < eye; i++) { + values[{i,i}] = 1; + } + } + + /// Creates a matrix from a 2D vector + /// @param vectors 2D vector containing columns with rows + SparseMatrix(std::vector> const &vectors) { + assert(all_of(vectors.begin(), vectors.end(), [&vectors](std::vector vec){ return vec.size() == vectors.front().size(); })); + this->width = vectors.empty() ? 0 : vectors.front().size(); + this->height = vectors.size(); + values.reserve(getEstimatedSize()); + for (int row = 0; row < int(vectors.size()); row++) { + auto rowVector = vectors[row]; + for (auto column = 0; column < int(rowVector.size()); column++) { + T val = rowVector[column]; + if (val != 0) { + values[{row,column}] = val; + } + } + } + } + + SparseMatrix(std::unordered_map,T> const &values, int width, int height) { + this->width = width; + this->height = height; + this->values = values; + } + + /// Creates a column vector from a std::vector + /// @param vector the vector + SparseMatrix(std::vector const &vector) { + this->width = vector.size(); + this->height = vector.empty() ? 0 : 1; + values.reserve(getEstimatedSize()); + for (int column = 0; column < int(vector.size()); column++) { + T val = vector[column]; + if (val != 0) { + values[{0,column}] = val; + } + } + } + + SparseMatrix(std::vector const &vs, int rows, int columns) { + assert(int(vs.size()) == rows * columns); + this->width = columns; + this->height = rows; + values.reserve(getEstimatedSize()); + for (int row = 0; row < rows; row++) { + for (int column = 0; column < columns; column++) { + T val = vs[row * rows + column]; + if (val != 0) { + values[std::pair{row,column}] = val; + } + } + } + } + + SparseMatrix(std::vector const &vec) { + assert(all_of(vec.begin(), vec.end(), [&vec](SparseMatrix m){ return m.getWidth() == vec.front().getWidth(); })); + std::unordered_map,T> val; + this->height = 0; + int estimated_size = std::accumulate(vec.begin(), vec.end(), 0, [](int acc, SparseMatrix m){ return acc + m.getEstimatedSize(); }); + values.reserve(estimated_size); + for (auto m: vec) { + for (auto const [key, value]: m.values) { + if (m._transposed) { + values[{key.second + this->height, key.first}] = value; + } else { + values[{key.first + this->height, key.second}] = value; + } + } + this->height += m.getHeight(); + } + this->width = vec.empty() ? 0 : vec.front().width; + } + + // MARK: - Properties + + /// The height of the matrix (number of rows) + int getHeight() const { return height; }; + /// The width of the matrix (number of columns) + int getWidth() const { return width; }; + /// Returns true when the matrix is empty + bool empty() const { return getWidth() == 0 && getHeight() == 0; }; + + // MARK: - Matrix operations + + /// Returns the transposed matrix + SparseMatrix transpose() const { + SparseMatrix result = SparseMatrix(*this); + result.transposed(); + return result; + } + + /// Transposes the matrix + void transposed() { + _transposed = !_transposed; + int oldWildth = width; + width = height; + height = oldWildth; + } + + /// Transforms the matrix to reduced row echelon form + SparseMatrix echelonForm() const { + SparseMatrix result = SparseMatrix(*this); + int pivot = 0; + for (int row = 0; row < height; row++) { + if (pivot >= width) { return result; } + int i = row; + while (result.value(i,pivot) == 0) { + if (++i >= height) { + i = row; + if (++pivot >= width) { return result; } + } + } + result.swap_rows(i, row); + result.divide_row(row, result.value(row,pivot)); + for (i = 0; i < height; i++) { + if (i != row) { + result.add_multiple_row(i, row, -result.value(i,pivot)); + } + } + } + return result; + } + + /// The rank of the matrix + int getRank() const { + SparseMatrix e = echelonForm(); + int rank = 0; + for (int row = 0; row < height; row++) { + for (int column = 0; column < width; column++) { + if ((e.value(row,column) == 0) && (column == width - 1)) { + return rank; + } else if (e.value(row,column) != 0) { + break; + } + } + rank++; + } + return rank; + } + + /// Basis of the linear span of the column vectors + static SparseMatrix span(SparseMatrix const& matrix, bool transposed = false) { + std::vector> rows; + SparseMatrix t = transposed ? matrix : matrix.transpose(); + SparseMatrix te = t.echelonForm(); + int rank = te.getRank(); + rows.reserve(rank); + for (int row = 0; row < rank; row++) { + rows.push_back(te.row(row)); + } + return SparseMatrix(rows).transpose(); + } + + /// Computes the null space for the column vectors + static SparseMatrix null(SparseMatrix const& matrix) { + auto rref = matrix.echelonForm(); + + std::unordered_map,T> result; + result.reserve(matrix.getEstimatedSize()); + int index = 0; + int offset = 0; + std::unordered_map index_map; + index_map.reserve(matrix.getWidth()); + + for (int row = 0; row < rref.getWidth(); row++) { + for (int column = offset; column < rref.getWidth(); column++) { + if (row < rref.getHeight() && rref.value(row,column) == 1) { + offset += 1; + break; + } else if ((row < rref.getHeight() && rref.value(row,column) == 0) || row >= rref.getHeight()) { + // insert -1 + result[{column,index}] = -1; + index_map[column] = true; + + // copy everyting above -1. Below is guranteed to be zero due to rref. + int of = 0; + for (int i = 0; i < std::min(row,rref.getHeight()) + of; i++) { + if (index_map.count(i) == 0) { + T value = rref.value(i-of,column); + if (value != 0) { + result[{i,index}] = value; + } + } else { + of += 1; + } + } + + index += 1; + offset += 1; + if (row >= rref.getHeight()) { + break; + } + } + } + } + return SparseMatrix(result,index,result.empty() ? 0 : rref.getWidth()); + } + + /// Converts the matrix to a 1D Vector by stacking the column vectors + std::vector toVector() const { + std::vector result; + result.reserve(getWidth() * getHeight()); + for (int column = 0; column < getWidth(); column++) { + for (int row = 0; row < getHeight(); row++) { + result.push_back(value(row,column)); + } + } + return result; + } + + /// Converts a 1D Vector to a Matrix with given dimensions + /// @param rows number of rows + /// @param columns number of columns + SparseMatrix reshape(int rows, int columns) const { + assert(rows > 0 && columns > 0); + return SparseMatrix(column(0), rows, columns).transpose(); + } + + std::vector> reshapeColumns(int height, int width) const { + std::vector> result; + result.reserve(getWidth()); + for (int c = 0; c < getWidth(); c++) { + result.push_back(SparseMatrix(column(c), height, width).transpose()); + } + return result; + } + + /// Sets the value at row i and column j + /// @param row + /// @param column + /// @param value + void setValue(int row, int column, T value) { + auto index = _transposed ? std::pair{column,row} : std::pair{row,column}; + if (value != 0) { + values[index] = value; + } else { + values.erase(index); + } + } + + /// Returns the value at row i and column j + /// @param row + /// @param column + T value(int row, int column) const { + assert(row < height && column < width); + auto index = _transposed ? std::pair{column,row} : std::pair{row,column}; + auto it = values.find(index); + + if (it == values.end()) { + return 0; + } + return it->second; + } + + /// Returns a vector with the elements of the row at index i. The returned row cannot be modified. + /// @param i Index of the row to return. + std::vector row(int i) { + assert(i < height); + std::vector result; + result.reserve(width); + for (int column = 0; column < width; column++) { + result.push_back(value(i,column)); + } + return result; + } + + /// Returns the column at index i. The returned column cannot be modified. + /// @param i Index of the column to return + std::vector column(int i) const { + assert(i < width); + std::vector result; + result.reserve(height); + for (int row = 0; row < height; row++) { + result.push_back(value(row,i)); + } + return result; + } + + void setColumn(std::vector const& vector, int column) { + assert(int(vector.size()) == height && column < width); + for (int row = 0; row < height; row++) { + value(row,column) = vector[row]; + } + } + + // MARK: - Operators + + T operator()(int i, int j) { return value(i,j); }; + + SparseMatrix& operator *=(SparseMatrix const& rhs) { + assert(width == rhs.height); + SparseMatrix result = SparseMatrix(height,rhs.width); + for (int i = 0; i < height; i++) { + for (int k = 0; k < width; k++) { + for (int j = 0; j < rhs.width; j++) { + T val = result.value(i,j) + value(i,k) * rhs.value(k,j); + result.setValue(i,j, val); + } + } + } + this->values = result.values; + this->width = result.width; + this->height = result.height; + this->_transposed = result._transposed; + return *this; + } + + SparseMatrix& operator *=(T scalar) { + for (T& value : values) { + value *= scalar; + } + return *this; + } + + SparseMatrix& operator +=(SparseMatrix const& rhs) { + assert(rhs.width == width && rhs.height == height); + for (int i=0;i& operator +=(T scalar) { + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + setValue(i,j, value(i,j) + scalar); + } + } + return *this; + } + + SparseMatrix& operator -=(SparseMatrix const& rhs) { + assert(rhs.width == width && rhs.height == height); + for (int i = 0; i < height; i++) { + for (int j= 0; j < width; j++) { + setValue(i,j, value(i,j) - rhs.value(i,j)); + } + } + return *this; + } + + SparseMatrix& operator -=(int scalar) { + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + setValue(i,j, value(i,j) - scalar); + } + } + return *this; + } + + bool operator==(SparseMatrix const& rhs) const { + if (rhs._transposed != _transposed && width == rhs.width && height == rhs.height) { + for (int row = 0; row < getHeight(); row++) { + for (int column = 0; column < getWidth(); column++) { + if (value(row, column) != rhs.value(row,column)) { + return false; + } + } + } + return true; + } + return values == rhs.values && width == rhs.width && height == rhs.height; + } + + virtual void print() const { + dbgs(4) << *this; + } + +protected: + + // MARK: - Echelon helpers + + /// Swaps two rows + /// @param a index of the first row + /// @param b index of the second row + void swap_rows(int a, int b) { + for (int column = 0; column < width; column++) { + auto nha = values.extract({a,column}); + auto nhb = values.extract({b,column}); + // check if node handle is not empty + if (!nha.empty()) { + nha.key() = {b,column}; + values.insert(std::move(nha)); + } + if (!nhb.empty()) { + nhb.key() = {a,column}; + values.insert(std::move(nhb)); + } + } + } + + /// Divides a row by a constant + /// @param row index of the row to divide + /// @param quotient quotient to divide the row by + void divide_row(int row, T quotient) { + for (int column = 0; column < width; column++) { + if (values.count({row,column}) != 0) { + setValue(row, column, value(row,column) / quotient); + } + } + } + + /// Adds a multiple of row b to a row a + /// @param a Row to add a multiple of b to + /// @param b Row to be added to row a + /// @param factor Factor to multiply row b with when adding it to row a + void add_multiple_row(int a, int b, T factor) { + for (int column = 0; column < width; column++) { + if (values.count({b,column}) != 0) { + setValue(a,column, value(a,column) + value(b,column) * factor); + } + } + } +}; + + +template +inline SparseMatrix operator*(SparseMatrix lhs, SparseMatrix const& rhs) { return lhs *= rhs; }; +template +inline SparseMatrix operator*(SparseMatrix lhs, T scalar) { return lhs *= scalar; }; +template +inline SparseMatrix operator+(SparseMatrix lhs, SparseMatrix const& rhs) { return lhs += rhs; }; +template +inline SparseMatrix operator+(SparseMatrix lhs, T scalar) { return lhs += scalar; }; +template +inline SparseMatrix operator-(SparseMatrix lhs, SparseMatrix const& rhs) { return lhs -= rhs; }; +template +inline SparseMatrix operator-(SparseMatrix lhs, T scalar) { return lhs -= scalar; }; + +template +llvm::raw_ostream& operator<<(llvm::raw_ostream& os, SparseMatrix const& matrix) { + for (int row = 0; row < matrix.getHeight(); row++) { + os << "[ "; + for (int column = 0; column < matrix.getWidth(); column++) { + if constexpr (std::is_floating_point_v) { + if (column == matrix.getWidth() - 1) { + os << llvm::format("%g", matrix.value(row,column)); + } else { + os << llvm::format("%-6g", matrix.value(row,column)); + } + } else { + if (column == matrix.getWidth() - 1) { + os << llvm::format("%d", matrix.value(row,column)); + } else { + os << llvm::format("%-6d", matrix.value(row,column)); + } + } + } + os << " ]\n"; + } + + if (matrix.getWidth() == 0 && matrix.getHeight() == 0) { + os << "[]\n"; + } + + return os; +}; + +} + diff --git a/src/value_set.h b/src/value_set.h index 1d5d04bb41eb892d6a3b8d2e30e5d1667bd81a7c..15d8c641719811be9cc94fe181eb72d4675103ce 100644 --- a/src/value_set.h +++ b/src/value_set.h @@ -3,10 +3,7 @@ #include #include -#include "llvm/IR/BasicBlock.h" -#include "llvm/IR/Constant.h" #include "llvm/IR/CFG.h" -#include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instructions.h" #include "global.h" @@ -88,64 +85,163 @@ public: isBottom = false; } - void apply(llvm::BasicBlock const& bb, std::vector const& pred_values) { - if (isBottom) { - dbgs(3) << " Basic block is unreachable, everything is bottom\n"; - return; + // This constructor is used to initialize the state of a function call, to which parameters are passed. + // This is the "enter" function as described in "Compiler Design: Analysis and Transformation" + explicit AbstractStateValueSet(llvm::Function const* callee_func, AbstractStateValueSet const& state, + llvm::CallInst const* call) { + assert(callee_func->arg_size() == call->getNumArgOperands()); + for (llvm::Argument const& arg: callee_func->args()) { + llvm::Value* value = call->getArgOperand(arg.getArgNo()); + if (value->getType()->isIntegerTy()) { + if (llvm::Constant const* c = llvm::dyn_cast(value)) { + values[&arg] = AbstractDomain {*c}; + } else { + values[&arg] = state.values.at(value); + } + } else { + values[&arg] = AbstractDomain {true}; + } } + isBottom = false; + } + + // Handles the evaluation of merging points + void applyPHINode(llvm::BasicBlock const& bb, std::vector const& pred_values, + llvm::Instruction const& inst) { + std::vector operands; + AbstractDomain inst_result; + + llvm::PHINode const* phi = llvm::dyn_cast(&inst); + + // Phi nodes are handled here, to get the precise values of the predecessors + for (unsigned i = 0; i < phi->getNumIncomingValues(); ++i) { + // Find the predecessor corresponding to the block of the phi node + unsigned block = 0; + for (llvm::BasicBlock const* pred_bb: llvm::predecessors(&bb)) { + if (pred_bb == phi->getIncomingBlock(i)) break; + ++block; + } + + // Take the union of the values + AbstractDomain pred_value = pred_values[block].getAbstractValue(*phi->getIncomingValue(i)); + inst_result = AbstractDomain::merge(Merge_op::UPPER_BOUND, inst_result, pred_value); + + operands.push_back(pred_value); // Keep the debug output happy + } + + values[&inst] = inst_result; + + debug_output(inst, operands); + } + + // Handles the evaluation of function calls + // This is the "combine" function as described in "Compiler Design: Analysis and Transformation" + void applyCallInst(llvm::Instruction const& inst, llvm::BasicBlock const* end_block, + AbstractStateValueSet const& callee_state) { std::vector operands; - // Go through each instruction of the basic block and apply it to the state - for (llvm::Instruction const& inst: bb) { - // If the result of the instruction is not used, there is no reason to compute - // it. (There are no side-effects in LLVM IR. (I hope.)) - if (inst.use_empty()) continue; + // Keep the debug output happy + for (llvm::Value const* value : inst.operand_values()) { + operands.push_back(getAbstractValue(*value)); + } - AbstractDomain inst_result; - - if (llvm::PHINode const* phi = llvm::dyn_cast(&inst)) { - // Phi nodes are handled here, to get the precise values of the predecessors - - for (unsigned i = 0; i < phi->getNumIncomingValues(); ++i) { - // Find the predecessor corresponding to the block of the phi node - unsigned block = 0; - for (llvm::BasicBlock const* pred_bb: llvm::predecessors(&bb)) { - if (pred_bb == phi->getIncomingBlock(i)) break; - ++block; - } - - // Take the union of the values - AbstractDomain pred_value = pred_values[block].getAbstractValue(*phi->getIncomingValue(i)); - inst_result = AbstractDomain::merge(Merge_op::UPPER_BOUND, inst_result, pred_value); - - operands.push_back(pred_value); // Keep the debug output happy - } - } else { - for (llvm::Value const* value: inst.operand_values()) { - operands.push_back(getAbstractValue(*value)); + //iterate through all instructions of it till we find a return statement + for (llvm::Instruction const& iter_inst: *end_block) { + if (llvm::ReturnInst const* ret_inst = llvm::dyn_cast(&iter_inst)) { + llvm::Value const* ret_val = ret_inst->getReturnValue(); + dbgs(4) << " Found return instruction\n"; + + if (callee_state.values.find(ret_val) != callee_state.values.end()) { + dbgs(4) << " Return evaluated, merging parameters\n"; + values[&inst] = callee_state.values.at(ret_val); + } else { + dbgs(4) << " Return not evaluated, setting to bottom\n"; + values[&inst] = AbstractDomain{}; // Initializes the return value to Bottom } + } + } + + debug_output(inst, operands); + } - // Compute the result of the operation - inst_result = AbstractDomain::interpret(inst, operands); + // Handles the evaluation of return instructions + void applyReturnInst(llvm::Instruction const& inst) { + llvm::Value const* ret_val = llvm::dyn_cast(&inst)->getReturnValue(); + if (ret_val && ret_val->getType()->isIntegerTy()) { + if (llvm::Constant const* c = llvm::dyn_cast(ret_val)) { + values[&inst] = AbstractDomain{ *c }; + } else if (values.find(ret_val) != values.end()) { + values[&inst] = values.at(ret_val); + } else { + values[&inst] = AbstractDomain{}; // Initializes the return value to Bottom } - - values[&inst] = inst_result; - - dbgs(3).indent(2) << inst << " // " << values.at(&inst) << ", args "; - {int i = 0; - for (llvm::Value const* value: inst.operand_values()) { - if (i) dbgs(3) << ", "; - if (value->getName().size()) dbgs(3) << '%' << value->getName() << " = "; - dbgs(3) << operands[i]; - ++i; - }} - dbgs(3) << '\n'; + } else { + values[&inst] = AbstractDomain{ true }; + } + } - operands.clear(); + // Handles the evaluation of all other instructions + void applyDefault(llvm::Instruction const& inst) { + std::vector operands; + + for (llvm::Value const* value: inst.operand_values()) { + operands.push_back(getAbstractValue(*value)); } + + // Compute the result of the operation + values[&inst] = AbstractDomain::interpret(inst, operands); + + debug_output(inst, operands); } - + + // Used for debug output + void debug_output(llvm::Instruction const& inst, std::vector operands) { + dbgs(3).indent(2) << inst << " // " << values.at(&inst) << ", args "; + {int i = 0; + for (llvm::Value const* value: inst.operand_values()) { + if (i) dbgs(3) << ", "; + if (value->getName().size()) dbgs(3) << '%' << value->getName() << " = "; + dbgs(3) << operands[i]; + ++i; + }} + dbgs(3) << '\n'; + } + + // Checks whether at least one of the operands is bottom -- in such a case we + // set the result to bottom as well + bool checkOperandsForBottom(llvm::Instruction const& inst) { + std::vector operands; + + for (llvm::Value const* value : inst.operand_values()) { + operands.push_back(getAbstractValue(*value)); + } + + for (llvm::Value const* value : inst.operand_values()) + if (llvm::Constant const* c = llvm::dyn_cast(value)) { + + } else + if (values[value].isBottom()) { + values[&inst] = AbstractDomain{}; + + dbgs(3).indent(2) << inst << " // " << values.at(&inst) << ", args "; + {int i = 0; + for (llvm::Value const* value : inst.operand_values()) { + if (i) dbgs(3) << ", "; + if (value->getName().size()) dbgs(3) << '%' << value->getName() << " = "; + dbgs(3) << operands[i]; + ++i; + }} + dbgs(3) << '\n'; + + operands.clear(); + + return true; + } + return false; + } + + bool merge(Merge_op::Type op, AbstractStateValueSet const& other) { bool changed = false; @@ -168,10 +264,10 @@ public: values[i.first] = v; changed = true; - + if (checkValueForBottom(4, i.first)) return changed; } - if (changed) checkForBottom(4); + //if (changed) checkForBottom(4); return changed; } @@ -247,17 +343,24 @@ public: if (values.count(&lhs) && values.count(&rhs)) { dbgs(3) << " Values restricted to %" << lhs.getName() << " = " << values[&lhs] << " and %" << rhs.getName() << " = " << values[&rhs] << '\n'; + if (!checkValueForBottom(6, &lhs)) checkValueForBottom(6, &rhs); } else if (values.count(&lhs)) { dbgs(3) << " Value restricted to %" << lhs.getName() << " = " << values[&lhs] << '\n'; + checkValueForBottom(6, &lhs); } else if (values.count(&rhs)) { dbgs(3) << " Value restricted to %" << rhs.getName() << " = " << values[&rhs] << '\n'; + checkValueForBottom(6, &rhs); } else { dbgs(3) << " No restrictions were derived.\n"; } // This cannot happen when doing UPPER_BOUND or WIDEN, but for NARROW it is possible, so // check just in case. - checkForBottom(6); + + //This was created before interprocedural analysis was added, therefore it might work wrong + //Perhaps, one should check each value for bottom separately, see bool checkValueForBottom() + //if (!checkValueForBottom(6, &lhs)) checkValueForBottom(6, &rhs); + //checkForBottom(4); } void printIncoming(llvm::BasicBlock const& bb, llvm::raw_ostream& out, int indentation = 0) const { @@ -284,9 +387,15 @@ public: } void printOutgoing(llvm::BasicBlock const& bb, llvm::raw_ostream& out, int indentation = 0) const { for (auto const& i: values) { - out.indent(indentation) << '%' << i.first->getName() << " = " << i.second << '\n'; + if (llvm::ReturnInst::classof(i.first)) { + out.indent(indentation) << " = " << i.second << '\n'; + } else { + out.indent(indentation) << '%' << i.first->getName() << " = " << i.second << '\n'; + } } - if (values.size() == 0) { + if (isBottom) { + out.indent(indentation) << "bottom\n"; + } else if (values.size() == 0) { out.indent(indentation) << "\n"; } }; @@ -308,6 +417,10 @@ public: // If any of our values is bottom, then we are bottom as well. So this function checks that and // normalises our value. Returns whether this changed our value (i.e. we are now bottom). + + // Actually the statement above is correct if we have only one function. If not, then we can + // have values that are bottom but do not belong to the current function => the function + // is not bottom. Use bool checkValueForBottom() if you want to check the values separately. bool checkForBottom(int indent = 0) { if (isBottom) return false; @@ -323,6 +436,21 @@ public: } return false; } + + // For a specific value checks whether it is a bottom. See bool checkForBottom for more + // information + bool checkValueForBottom(int indent, llvm::Value const* value) { + if (isBottom) return false; + if (values[value] == AbstractDomain{}) { + dbgs(3).indent(indent) << "Variable %" << value->getName() << " is bottom, so the state is as well.\n"; + + values.clear(); + isBottom = true; + + return true; + } + return false; + } }; } /* end of namespace pcpo */ diff --git a/test/linear_subspace_test.cpp b/test/linear_subspace_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..97a51cd6c5a9f530b9403dafb7d0a470b6deeca8 --- /dev/null +++ b/test/linear_subspace_test.cpp @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include + +#include "../src/linear_subspace.h" + +using namespace std; +using namespace pcpo; + +class LinearSubspaceTest: LinearSubspace { + +public: + static bool runTestLeastUpperBound1(); + static bool runTestLeastUpperBound2(); +}; + +const llvm::Value *x1 = (llvm::Value *) 1; +const llvm::Value *x2 = (llvm::Value *) 2; +const llvm::Value *x3 = (llvm::Value *) 3; + +const std::unordered_map mock_index = { + {x1, 1}, + {x2, 2}, + {x3, 3}, +}; + +bool LinearSubspaceTest::runTestLeastUpperBound1() { + std::cout << "Testing least upper bound 1: "; + bool result = false; + + LinearSubspace r1 = LinearSubspace(); + r1.isBottom = false; + r1.basis = {MatrixType(4)}; + r1.index = mock_index; + + LinearSubspace r2 = LinearSubspace(); + r2.isBottom = false; + r2.basis = {MatrixType(4)}; + r2.index = mock_index; + + LinearSubspace expected = LinearSubspace(); + expected.basis = {MatrixType(4)}; + + + auto actual = r1.leastUpperBound(r2); + + result = r1.basis == expected.basis && !actual && !r1.isBottom; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +bool LinearSubspaceTest::runTestLeastUpperBound2() { + std::cout << "Testing least upper bound 2: "; + bool result = false; + + LinearSubspace r1 = LinearSubspace(); + r1.isBottom = false; + MatrixType b1 = MatrixType(4); + b1.setValue(0,1, 1); + b1.setValue(2,1, 1); + r1.basis = {b1}; + r1.index = mock_index; + + LinearSubspace r2 = LinearSubspace(); + r2.isBottom = false; + MatrixType b2 = MatrixType(4); + b2.setValue(0,3, 1); + r2.basis = {b2}; + r2.index = mock_index; + + LinearSubspace expected = LinearSubspace(); + MatrixType e1 = MatrixType(4); + e1.setValue(0,3, 1); + MatrixType e2 = MatrixType(4); + e2.setValue(0,0, 0); + e2.setValue(1,1, 0); + e2.setValue(2,2, 0); + e2.setValue(3,3, 0); + e2.setValue(0,1, 1); + e2.setValue(2,1, 1); + e2.setValue(0,3, -1); + expected.basis = {e1, e2}; + + + auto actual = r1.leastUpperBound(r2); + + result = r1.basis == expected.basis && actual && !r1.isBottom; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + + +int main() { + return !(LinearSubspaceTest::runTestLeastUpperBound1() + && LinearSubspaceTest::runTestLeastUpperBound2() + ); +}; + + diff --git a/test/normalized_conjunction_test.cpp b/test/normalized_conjunction_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..88726748ce87045b1e8714bd0b8bceef8550a56b --- /dev/null +++ b/test/normalized_conjunction_test.cpp @@ -0,0 +1,317 @@ +#include +#include +#include +#include +#include + +#include "../src/normalized_conjunction.h" +#include "../src/linear_equality.h" + +using namespace pcpo; +using namespace llvm; + + +class NormalizedConjunctionTest: NormalizedConjunction { + +public: + static bool runTestAll(); + static bool runTestMerge(); + static bool runTestX0(); + static bool runTestX1(); + static bool runTestX2(); + static bool runTestX4(); + static bool runNonDeterministicAssignmentTest1(); + static bool runNonDeterministicAssignmentTest2(); + static bool runLinearAssignmentTest1(); + static bool runLinearAssignmentTest2(); +}; + +const Value *x1 = (Value *) 1; +const Value *x2 = (Value *) 2; +const Value *x3 = (Value *) 3; +const Value *x4 = (Value *) 4; +const Value *x5 = (Value *) 5; +const Value *x6 = (Value *) 6; +const Value *x7 = (Value *) 7; +const Value *x8 = (Value *) 8; +const Value *x9 = (Value *) 9; +const Value *x10 = (Value *) 10; +const Value *x11 = (Value *) 11; +const Value *x12 = (Value *) 12; + +const std::unordered_map E1 = { + {x1, {x1, 1, x1, 0}}, + {x2, {x2, 1, x2, 0}}, + {x3, {x3, 1, x1, 0}}, + {x4, {x4, 3, x2, 5}}, + {x5, {x5, 3, x1, 15}}, + {x6, {x6, 1, x1, 3}}, + {x7, {x7, 1, x1, 2}}, + {x8, {x8, 7, x1, 15}}, + {x9, {x9, 1, nullptr, 0}}, + {x10, {x10, 1, nullptr, 2}}, + {x11, {x11, 1, nullptr, 1}}, + {x12, {x12, 1, nullptr, 3}} +}; + +const std::unordered_map E2 = { + {x1, {x1, 1, x1, 0}}, + {x2, {x2, 1, x2, 0}}, + {x3, {x3, 1, x2, -5}}, + {x4, {x4, 3, x2, 5}}, + {x5, {x5, 3, x2, 0}}, + {x6, {x6, 1, x2, 1}}, + {x7, {x7, 1, x2, 0}}, + {x8, {x8, 21, x2, -20}}, + {x9, {x9, 1, nullptr, 1}}, + {x10, {x10, 1, nullptr, 4}}, + {x11, {x11, 2, x1, -3}}, + {x12, {x12, 4, x1, -5}} +}; + +auto mapToSeccond = [](std::pair p){ return p.second; }; + + +bool NormalizedConjunctionTest::runTestAll() { + std::cout << "Testing all: "; + bool result = false; + std::unordered_map expected = { + {x4, {x4, 3, x2, 5}}, + {x5, {x5, 3, x3, 15}}, + {x7, {x7, 1, x6, -1}}, + {x10, {x10, 2, x9, 2}}, + {x12, {x12, 2, x11, 1}} + }; + + auto actual = NormalizedConjunction(E1); + actual.leastUpperBound(NormalizedConjunction(E2)); + + result = actual.values == expected; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +bool NormalizedConjunctionTest::runTestMerge() { + std::cout << "Testing merge: "; + bool result = false; + + std::unordered_map x = { + {x1, {x1, 1, x1, 0}}, + {x2, {x2, 1, x2, 0}}, + {x3, {x3, 1, x1, 0}}, + {x4, {x4, 3, x2, 5}}, + {x5, {x5, 3, x1, 15}}, + {x6, {x6, 1, x1, 3}}, + {x7, {x7, 1, x1, 2}}, + {x8, {x8, 7, x1, 15}}, + {x9, {x9, 1, nullptr, 0}}, + {x10, {x10, 1, nullptr, 2}}, + {x11, {x11, 1, nullptr, 1}}, + {x12, {x12, 1, nullptr, 3}} + }; + + std::unordered_map y = { + + }; + + auto actual = NormalizedConjunction(x); + auto other = NormalizedConjunction(y); + actual.merge(Merge_op::UPPER_BOUND, other); + + result = actual.values == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +bool NormalizedConjunctionTest::runTestX0() { + std::cout << "Testing X0: "; + bool result = true; + + std::set expected = { + {x4, 3, x2, 5} + }; + + std::set E1Set, E2Set; + transform(E1, std::inserter(E1Set, E1Set.end()), mapToSeccond); + transform(E2, std::inserter(E2Set, E2Set.end()), mapToSeccond); + + auto actual = NormalizedConjunction::computeX0(E1Set, E2Set); + + result = actual == expected; + std::cout << (result ? "success" : "failed") << "\n"; + return result; +} + +bool NormalizedConjunctionTest::runTestX1() { + std::cout << "Testing X1: "; + bool result = false; + + std::set expected = { + {x10, 2, x9, 2} + }; + + std::set E1Set, E2Set; + transform(E1, std::inserter(E1Set, E1Set.end()), mapToSeccond); + transform(E2, std::inserter(E2Set, E2Set.end()), mapToSeccond); + + auto actual = NormalizedConjunction::computeX1(E1Set, E2Set); + + result = actual == expected; + std::cout << (result ? "success" : "failed") << "\n"; + return result; +} + +bool NormalizedConjunctionTest::runTestX2() { + std::cout << "Testing X2: "; + bool result = false; + + std::set expected = { + {x12, 2, x11, 1} + }; + + std::set E1Set, E2Set; + transform(E1, std::inserter(E1Set, E1Set.end()), mapToSeccond); + transform(E2, std::inserter(E2Set, E2Set.end()), mapToSeccond); + + auto actual = NormalizedConjunctionTest::computeX2(E1Set, E2Set); + + result = actual == expected; + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +bool NormalizedConjunctionTest::runTestX4() { + std::cout << "Testing X4: "; + bool result = false; + + std::set expected = { + {x5, 3, x3, 15}, + {x7, 1, x6, -1} + }; + + std::set E1Set, E2Set; + transform(E1, std::inserter(E1Set, E1Set.end()), mapToSeccond); + transform(E2, std::inserter(E2Set, E2Set.end()), mapToSeccond); + + auto actual = NormalizedConjunctionTest::computeX4(E1Set, E2Set); + + result = actual == expected; + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +bool NormalizedConjunctionTest::runNonDeterministicAssignmentTest1() { + std::cout << "Testing non deterministic Assignment 1: "; + bool result = false; + + NormalizedConjunction E = NormalizedConjunction({ + {x1, {x1, 1, nullptr, 4}}, + {x2, {x2, 1, nullptr, 2}} + }); + + auto expected = NormalizedConjunction({ + {x1, {x1, 1, nullptr, 4}}, + {x2, {x2, 1, x2, 0}}, + }); + + E.nonDeterminsticAssignment(x2); + + result = E.values == expected.values; + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +bool NormalizedConjunctionTest::runNonDeterministicAssignmentTest2() { + std::cout << "Testing non deterministic Assignment 2: "; + bool result = false; + + NormalizedConjunction E = NormalizedConjunction({ + {x1, {x1, 1, x1, 0}}, + {x2, {x2, 1, x1, 2}}, + {x3, {x3, 1, x2, 4}}, + {x4, {x4, 1, x1, 10}} + }); + + auto expected = NormalizedConjunction({ + {x1, {x1, 1, x1, 0}}, + {x2, {x2, 1, x2, 0}}, + {x3, {x3, 1, x2, 4}}, + {x4, {x4, 1, x2, 8}} + }); + + E.nonDeterminsticAssignment(x1); + + result = E.values == expected.values; + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +bool NormalizedConjunctionTest::runLinearAssignmentTest1() { + std::cout << "Testing linear Assignment 1: "; + bool result = false; + + NormalizedConjunction E = NormalizedConjunction({ + {x1, {x1, 1, nullptr, 2}}, + {x2, {x2, 1, x2, 0}}, + {x3, {x3, 1, x2, 3}} + + }); + + auto expected = NormalizedConjunction({ + {x1, {x1, 1, nullptr, 2}}, + {x2, {x2, 1, nullptr, 5}}, + {x3, {x3, 1, x3, 0}} + }); + + E.linearAssignment(x2, 1, x1, 3); + + result = E.values == expected.values; + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +bool NormalizedConjunctionTest::runLinearAssignmentTest2() { + std::cout << "Testing linear Assignment 2: "; + bool result = false; + + NormalizedConjunction E = NormalizedConjunction({ + {x1, {x1, 1, x1, 0}}, + {x2, {x2, 1, x1, 4}}, + {x3, {x3, 1, x3, 0}}, + {x4, {x4, 1, x3, 10}}, + {x5 ,{x5, 1, x3, -4}}, + {x6, {x6, 1, x3, 1}} + }); + + auto expected = NormalizedConjunction({ + {x1, {x1, 1, x1, 0}}, + {x2, {x2, 1, x2, 0}}, + {x3, {x3, 1, x2, -11}}, + {x4, {x4, 1, x2, -1}}, + {x5, {x5, 1, x2, -15}}, + {x6, {x6, 1, x2, -10}} + }); + + E.linearAssignment(x2, 1, x4, 1); + + result = E.values == expected.values; + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +int main() { + + return !(NormalizedConjunctionTest::runTestX0() + && NormalizedConjunctionTest::runTestX1() + && NormalizedConjunctionTest::runTestX2() + && NormalizedConjunctionTest::runTestX4() + && NormalizedConjunctionTest::runTestAll() + && NormalizedConjunctionTest::runTestMerge() + && NormalizedConjunctionTest::runNonDeterministicAssignmentTest1() + && NormalizedConjunctionTest::runNonDeterministicAssignmentTest2() + && NormalizedConjunctionTest::runLinearAssignmentTest1() + && NormalizedConjunctionTest::runLinearAssignmentTest2() + ); +} diff --git a/test/simple_interval_test.cpp b/test/simple_interval_test.cpp index fa0d7d4d13c93e51c7b501b6991b004a6ecf9579..566e134a371653461614734cc7aa2bdca96543ae 100644 --- a/test/simple_interval_test.cpp +++ b/test/simple_interval_test.cpp @@ -1,8 +1,7 @@ - #include #include -#include "simple_interval.h" +#include "../src/simple_interval.h" // Standard integer types using s64 = std::int64_t; @@ -229,19 +228,17 @@ void testSimpleDomain(u32 w, u32 iters, u64* errs) { u64 error_count; int main() { using namespace pcpo; - u64 iters = 64; + u64 iters = 512; // Use this to reproduce a failing example more quickly. Simply insert the // last random hash the script outputs and the correct bitwidth. //rand_state = 0xe596fd2a27fe71c7ull; //testSimpleDomain(16, iters, &error_count); - while (true) { - testSimpleDomain( 8, iters, &error_count); - testSimpleDomain(16, iters, &error_count); - testSimpleDomain(17, iters, &error_count); - testSimpleDomain(32, iters, &error_count); - testSimpleDomain(64, iters, &error_count); - iters *= 2; - } -} + testSimpleDomain( 8, iters, &error_count); + testSimpleDomain(16, iters, &error_count); + testSimpleDomain(17, iters, &error_count); + testSimpleDomain(32, iters, &error_count); + testSimpleDomain(64, iters, &error_count); + iters *= 2; +} \ No newline at end of file diff --git a/test/simple_matrix_test.cpp b/test/simple_matrix_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aad39806ee40861ab7908bfcd72c58a01c70da98 --- /dev/null +++ b/test/simple_matrix_test.cpp @@ -0,0 +1,512 @@ +#include +#include +#include +#include +#include + +#include "../src/simple_matrix.h" + +using namespace std; +using namespace pcpo; + +template +class MatrixTest: Matrix { + +public: + static bool runTestMul1(); + static bool runTestMul2(); + static bool runTestTranspose1(); + static bool runTestTranspose2(); + static bool runTestEchelon1(); + static bool runTestEchelon2(); + static bool runTestEchelon3(); + static bool runTestRank1(); + static bool runTestRank2(); + static bool runTestRank3(); + static bool runTestSpan1(); + static bool runTestNull1(); + static bool runTestNull2(); + static bool runTestNull3(); + static bool runTestNull4(); + static bool runTestNull5(); +}; + +template +bool MatrixTest::runTestMul1() { + std::cout << "Testing multiplication 1: "; + bool result = false; + + std::vector> a = { + {1,4,7}, + {2,5,8}, + {3,6,9} + }; + + std::vector> b = { + {4,29,0}, + {-1,27,2}, + {100,5,3} + }; + + std::vector> expected = { + {700,172,29}, + {803,233,34}, + {906,294,39} + }; + + auto actual = Matrix(a) * Matrix(b); + + auto x = Matrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestMul2() { + std::cout << "Testing multiplication 2: "; + bool result = false; + + std::vector> a = { + {1,6,11}, + {2,7,12}, + {3,8,13}, + {4,9,14}, + {5,10,-9} + }; + + std::vector> b = { + {43,45,1,9}, + {224,7,-2,24}, + {12,1,13,-6} + }; + + std::vector> expected = { + {1519,98,132,87}, + {1798,151,144,114}, + {2077,204,156,141}, + {2356,257,168,168}, + {2347,286,-132,339} + }; + + auto actual = Matrix(a) * Matrix(b); + + auto x = Matrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestTranspose1() { + std::cout << "Testing transpose 1: "; + bool result = false; + + std::vector> a = { + {1,2,3}, + {4,5,6}, + {7,8,9} + }; + + std::vector> expected = { + {1,4,7}, + {2,5,8}, + {3,6,9} + }; + + auto matrix = Matrix(a); + auto actual = matrix.transpose(); + + auto x = Matrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestTranspose2() { + std::cout << "Testing transpose 2: "; + bool result = false; + + std::vector> a = { + {1,4}, + {2,5}, + {3,6} + }; + + std::vector> expected = { + {1,2,3}, + {4,5,6} + }; + + auto matrix = Matrix(a); + auto actual = matrix.transpose(); + + auto x = Matrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestEchelon1() { + std::cout << "Testing echelon 1: "; + bool result = false; + + std::vector> a = { + {1,4,7}, + {2,5,8}, + {3,6,9} + }; + + std::vector> expected = { + {1,0,-1}, + {0,1,2}, + {0,0,0} + }; + + auto matrix = Matrix(a); + auto actual = matrix.echelonForm(); + + auto x = Matrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestEchelon2() { + std::cout << "Testing echelon 2: "; + bool result = false; + + std::vector> a = { + {1,2,1}, + {1,4,8}, + {1,6,3} + }; + + std::vector> expected = { + {1,0,0}, + {0,1,0}, + {0,0,1} + }; + + auto matrix = Matrix(a); + auto actual = matrix.echelonForm(); + + auto x = Matrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestEchelon3() { + std::cout << "Testing echelon 3: "; + bool result = false; + + std::vector> a = { + {1,2,4}, + {2,4,8}, + {4,8,16} + }; + + std::vector> expected = { + {1,2,4}, + {0,0,0}, + {0,0,0} + }; + + auto matrix = Matrix(a); + auto actual = matrix.echelonForm(); + + auto x = Matrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestRank1() { + std::cout << "Testing rank 1: "; + bool result = false; + + std::vector> a = { + {1,4,7}, + {2,5,8}, + {3,6,9} + }; + + int expected = 2; + + auto matrix = Matrix(a); + auto actual = matrix.getRank(); + + result = actual == expected; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestRank2() { + std::cout << "Testing rank 2: "; + bool result = false; + + std::vector> a = { + {1,2,4}, + {2,4,8}, + {4,8,16} + }; + + int expected = 1; + + auto matrix = Matrix(a); + auto actual = matrix.getRank(); + + result = actual == expected; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestRank3() { + std::cout << "Testing rank 3: "; + bool result = false; + + std::vector> a = { + {1,2,1}, + {1,4,8}, + {1,6,3} + }; + + int expected = 3; + + auto matrix = Matrix(a); + auto actual = matrix.getRank(); + + result = actual == expected; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestSpan1() { + std::cout << "Testing span 1: "; + bool result = false; + + std::vector> a = { + {1,1,4}, + {0,1,4}, + {1,0,0} + }; + + std::vector> expected = { + {1,0}, + {0,1}, + {1,-1} + }; + + auto matrix = Matrix(a); + auto actual = Matrix::span(matrix); + + + result = actual == Matrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestNull1() { + std::cout << "Testing nullspace 1: "; + bool result = false; + + + std::vector> a = { + {1,0,0}, + {0,1,0}, + {0,0,1} + }; + + auto matrix = Matrix(a); + auto actual = Matrix::null(matrix); + Matrix expected = Matrix({}); + + result = actual == Matrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestNull2() { + std::cout << "Testing nullspace 2: "; + bool result = false; + + + std::vector> a = { + {1,-10, -24, -42}, + {1,-8,-18,-32}, + {-2,20,51,87} + }; + + std::vector> b = { + {2}, + {2}, + {1}, + {-1} + }; + + auto matrix = Matrix(a); + auto actual = Matrix::null(matrix); + auto expected = Matrix(b); + + result = actual == Matrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestNull3() { + std::cout << "Testing nullspace 3: "; + bool result = false; + + + std::vector> a = { + {0,1,0,0,-2,-13}, + {0,0,0,1, 2, 5}, + {0,0,1,0, 1, 9} + }; + + std::vector> b = { + {-1, 0, 0}, + {0, -2, -13}, + {0, 1, 9}, + {0, 2, 5}, + {0, -1, 0}, + {0, 0, -1} + }; + + auto matrix = Matrix(a); + auto actual = Matrix::null(matrix); + auto expected = Matrix(b); + + result = actual == Matrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestNull4() { + std::cout << "Testing nullspace 4: "; + bool result = false; + + + std::vector> a = { + {0,0,1,0,0,0,0,-2,-13}, + {0,0,0,0,0,0,1, 2, 5}, + {0,0,0,0,0,1,0, 1, 9} + }; + + std::vector> b = { + {-1,0,0,0, 0, 0}, + {0,-1,0,0, 0, 0}, + {0,0,0,0, -2,-13}, + {0,0,-1,0, 0, 0}, + {0,0,0,-1, 0, 0}, + {0,0,0,0,1,9}, + {0,0,0,0,2,5}, + {0,0,0,0,-1, 0}, + {0,0,0,0, 0, -1} + }; + + auto matrix = Matrix(a); + auto actual = Matrix::null(matrix); + auto expected = Matrix(b); + + result = actual == Matrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool MatrixTest::runTestNull5() { + std::cout << "Testing nullspace 5: "; + bool result = false; + + + std::vector> a = { + {0,1,1}, + {0,0,1}, + {0,0,0} + }; + + std::vector> b = { + {0,0,0}, + {0,0,1}, + {0,0,0} + }; + + std::vector> ans = { + {-1}, + {0}, + {0} + }; + + + auto A = Matrix(a); + auto B = Matrix(b); + auto actual = Matrix::null(Matrix(std::vector{A,B})); + auto expected = Matrix(ans); + + result = actual == Matrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + + + +int main() { + return !(MatrixTest::runTestMul1() + && MatrixTest::runTestMul2() + && MatrixTest::runTestTranspose1() + && MatrixTest::runTestTranspose2() + && MatrixTest::runTestEchelon1() + && MatrixTest::runTestEchelon2() + && MatrixTest::runTestEchelon3() + && MatrixTest::runTestRank1() + && MatrixTest::runTestRank2() + && MatrixTest::runTestRank3() + && MatrixTest::runTestSpan1() + && MatrixTest::runTestMul2() + && MatrixTest::runTestTranspose1() + && MatrixTest::runTestTranspose2() + && MatrixTest::runTestEchelon1() + && MatrixTest::runTestEchelon2() + && MatrixTest::runTestEchelon3() + && MatrixTest::runTestRank1() + && MatrixTest::runTestRank2() + && MatrixTest::runTestRank3() + && MatrixTest::runTestSpan1() + && MatrixTest::runTestNull1() + && MatrixTest::runTestNull2() + && MatrixTest::runTestNull3() + && MatrixTest::runTestNull4() + && MatrixTest::runTestNull5() + ); +}; + diff --git a/test/sparse_matrix_test.cpp b/test/sparse_matrix_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..53162cb09d7f6e95394e460205f2e00ee385044b --- /dev/null +++ b/test/sparse_matrix_test.cpp @@ -0,0 +1,515 @@ +#include +#include +#include +#include +#include + +#include "../src/sparse_matrix.h" + +using namespace std; +using namespace pcpo; + +template +class SparseMatrixTest: SparseMatrix { + +public: + static bool runTestMul1(); + static bool runTestMul2(); + static bool runTestTranspose1(); + static bool runTestTranspose2(); + static bool runTestEchelon1(); + static bool runTestEchelon2(); + static bool runTestEchelon3(); + static bool runTestRank1(); + static bool runTestRank2(); + static bool runTestRank3(); + static bool runTestSpan1(); + static bool runTestNull1(); + static bool runTestNull2(); + static bool runTestNull3(); + static bool runTestNull4(); + static bool runTestNull5(); +}; + +template +bool SparseMatrixTest::runTestMul1() { + std::cout << "Testing multiplication 1: "; + bool result = false; + + std::vector> a = { + {1,4,7}, + {2,5,8}, + {3,6,9} + }; + + std::vector> b = { + {4,29,0}, + {-1,27,2}, + {100,5,3} + }; + + std::vector> expected = { + {700,172,29}, + {803,233,34}, + {906,294,39} + }; + + auto actual = SparseMatrix(a) * SparseMatrix(b); + + auto x = SparseMatrix(expected); + result = actual == x; + + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestMul2() { + std::cout << "Testing multiplication 2: "; + bool result = false; + + std::vector> a = { + {1,6,11}, + {2,7,12}, + {3,8,13}, + {4,9,14}, + {5,10,-9} + }; + + std::vector> b = { + {43,45,1,9}, + {224,7,-2,24}, + {12,1,13,-6} + }; + + std::vector> expected = { + {1519,98,132,87}, + {1798,151,144,114}, + {2077,204,156,141}, + {2356,257,168,168}, + {2347,286,-132,339} + }; + + auto actual = SparseMatrix(a) * SparseMatrix(b); + + auto x = SparseMatrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestTranspose1() { + std::cout << "Testing transpose 1: "; + bool result = false; + + std::vector> a = { + {1,2,3}, + {4,5,6}, + {7,8,9} + }; + + std::vector> expected = { + {1,4,7}, + {2,5,8}, + {3,6,9} + }; + + auto matrix = SparseMatrix(a); + + auto actual = matrix.transpose(); + + auto x = SparseMatrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestTranspose2() { + std::cout << "Testing transpose 2: "; + bool result = false; + + std::vector> a = { + {1,4}, + {2,5}, + {3,6} + }; + + std::vector> expected = { + {1,2,3}, + {4,5,6} + }; + + auto matrix = SparseMatrix(a); + auto actual = matrix.transpose(); + + auto x = SparseMatrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestEchelon1() { + std::cout << "Testing echelon 1: "; + bool result = false; + + std::vector> a = { + {1,4,7}, + {2,5,8}, + {3,6,9} + }; + + std::vector> expected = { + {1,0,-1}, + {0,1,2}, + {0,0,0} + }; + + auto matrix = SparseMatrix(a); + auto actual = matrix.echelonForm(); + + auto x = SparseMatrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestEchelon2() { + std::cout << "Testing echelon 2: "; + bool result = false; + + std::vector> a = { + {1,2,1}, + {1,4,8}, + {1,6,3} + }; + + std::vector> expected = { + {1,0,0}, + {0,1,0}, + {0,0,1} + }; + + auto matrix = SparseMatrix(a); + auto actual = matrix.echelonForm(); + + auto x = SparseMatrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestEchelon3() { + std::cout << "Testing echelon 3: "; + bool result = false; + + std::vector> a = { + {1,2,4}, + {2,4,8}, + {4,8,16} + }; + + std::vector> expected = { + {1,2,4}, + {0,0,0}, + {0,0,0} + }; + + auto matrix = SparseMatrix(a); + auto actual = matrix.echelonForm(); + + auto x = SparseMatrix(expected); + result = actual == x; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestRank1() { + std::cout << "Testing rank 1: "; + bool result = false; + + std::vector> a = { + {1,4,7}, + {2,5,8}, + {3,6,9} + }; + + int expected = 2; + + auto matrix = SparseMatrix(a); + auto actual = matrix.getRank(); + + result = actual == expected; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestRank2() { + std::cout << "Testing rank 2: "; + bool result = false; + + std::vector> a = { + {1,2,4}, + {2,4,8}, + {4,8,16} + }; + + int expected = 1; + + auto matrix = SparseMatrix(a); + auto actual = matrix.getRank(); + + result = actual == expected; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestRank3() { + std::cout << "Testing rank 3: "; + bool result = false; + + std::vector> a = { + {1,2,1}, + {1,4,8}, + {1,6,3} + }; + + int expected = 3; + + auto matrix = SparseMatrix(a); + auto actual = matrix.getRank(); + + result = actual == expected; + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestSpan1() { + std::cout << "Testing span 1: "; + bool result = false; + + std::vector> a = { + {1,1,4}, + {0,1,4}, + {1,0,0} + }; + + std::vector> expected = { + {1,0}, + {0,1}, + {1,-1} + }; + + auto matrix = SparseMatrix(a); + auto actual = SparseMatrix::span(matrix); + + + result = actual == SparseMatrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestNull1() { + std::cout << "Testing nullspace 1: "; + bool result = false; + + + std::vector> a = { + {1,0,0}, + {0,1,0}, + {0,0,1} + }; + + auto matrix = SparseMatrix(a); + auto actual = SparseMatrix::null(matrix); + SparseMatrix expected = SparseMatrix({}); + + result = actual == SparseMatrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestNull2() { + std::cout << "Testing nullspace 2: "; + bool result = false; + + + std::vector> a = { + {1,-10, -24, -42}, + {1,-8,-18,-32}, + {-2,20,51,87} + }; + + std::vector> b = { + {2}, + {2}, + {1}, + {-1} + }; + + auto matrix = SparseMatrix(a); + auto actual = SparseMatrix::null(matrix); + auto expected = SparseMatrix(b); + + result = actual == SparseMatrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestNull3() { + std::cout << "Testing nullspace 3: "; + bool result = false; + + + std::vector> a = { + {0,1,0,0,-2,-13}, + {0,0,0,1, 2, 5}, + {0,0,1,0, 1, 9} + }; + + std::vector> b = { + {-1, 0, 0}, + {0, -2, -13}, + {0, 1, 9}, + {0, 2, 5}, + {0, -1, 0}, + {0, 0, -1} + }; + + auto matrix = SparseMatrix(a); + auto actual = SparseMatrix::null(matrix); + auto expected = SparseMatrix(b); + + result = actual == SparseMatrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestNull4() { + std::cout << "Testing nullspace 4: "; + bool result = false; + + + std::vector> a = { + {0,0,1,0,0,0,0,-2,-13}, + {0,0,0,0,0,0,1, 2, 5}, + {0,0,0,0,0,1,0, 1, 9} + }; + + std::vector> b = { + {-1,0,0,0, 0, 0}, + {0,-1,0,0, 0, 0}, + {0,0,0,0, -2,-13}, + {0,0,-1,0, 0, 0}, + {0,0,0,-1, 0, 0}, + {0,0,0,0,1,9}, + {0,0,0,0,2,5}, + {0,0,0,0,-1, 0}, + {0,0,0,0, 0, -1} + }; + + auto matrix = SparseMatrix(a); + auto actual = SparseMatrix::null(matrix); + auto expected = SparseMatrix(b); + + result = actual == SparseMatrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + +template +bool SparseMatrixTest::runTestNull5() { + std::cout << "Testing nullspace 5: "; + bool result = false; + + + std::vector> a = { + {0,1,1}, + {0,0,1}, + {0,0,0} + }; + + std::vector> b = { + {0,0,0}, + {0,0,1}, + {0,0,0} + }; + + std::vector> ans = { + {-1}, + {0}, + {0} + }; + + + auto A = SparseMatrix(a); + auto B = SparseMatrix(b); + auto actual = SparseMatrix::null(SparseMatrix(std::vector{A,B})); + auto expected = SparseMatrix(ans); + + result = actual == SparseMatrix(expected); + + std::cout << (result? "success" : "failed") << "\n"; + return result; +} + + + +int main() { + return !(SparseMatrixTest::runTestMul1() + && SparseMatrixTest::runTestMul2() + && SparseMatrixTest::runTestTranspose1() + && SparseMatrixTest::runTestTranspose2() + && SparseMatrixTest::runTestEchelon1() + && SparseMatrixTest::runTestEchelon2() + && SparseMatrixTest::runTestEchelon3() + && SparseMatrixTest::runTestRank1() + && SparseMatrixTest::runTestRank2() + && SparseMatrixTest::runTestRank3() + && SparseMatrixTest::runTestSpan1() + && SparseMatrixTest::runTestMul2() + && SparseMatrixTest::runTestTranspose1() + && SparseMatrixTest::runTestTranspose2() + && SparseMatrixTest::runTestEchelon1() + && SparseMatrixTest::runTestEchelon2() + && SparseMatrixTest::runTestEchelon3() + && SparseMatrixTest::runTestRank1() + && SparseMatrixTest::runTestRank2() + && SparseMatrixTest::runTestRank3() + && SparseMatrixTest::runTestSpan1() + && SparseMatrixTest::runTestNull1() + && SparseMatrixTest::runTestNull2() + && SparseMatrixTest::runTestNull3() + && SparseMatrixTest::runTestNull4() + && SparseMatrixTest::runTestNull5() + ); +}; + +