diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1d9d7fa --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +*.lst +*.bin +*.pt +*.exp +*.out +*.diff diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..1893bb1 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,263 @@ +# Copyright (c) 2013-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +cmake_minimum_required(VERSION 2.8.6) + +project(PT) + +# versioning +# +# the major and the minor number define the supported Intel PT set. +# +# a build number and a version extension can be optionally specified. +# +set(PT_VERSION_MAJOR 1) +set(PT_VERSION_MINOR 6) +set(PT_VERSION_BUILD "1" CACHE STRING "") +set(PT_VERSION_EXT "" CACHE STRING "") + +set(PT_VERSION "${PT_VERSION_MAJOR}.${PT_VERSION_MINOR}.${PT_VERSION_BUILD}") + +add_definitions( + -DPT_VERSION_MAJOR=${PT_VERSION_MAJOR} + -DPT_VERSION_MINOR=${PT_VERSION_MINOR} + -DPT_VERSION_BUILD=${PT_VERSION_BUILD} + -DPT_VERSION_EXT=\"${PT_VERSION_EXT}\" +) + +include(GNUInstallDirs) +include(FindUnixCommands) + +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(MAN_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/man) + +set(CMAKE_COLOR_MAKEFILE OFF) +set(CMAKE_VERBOSE_MAKEFILE ON) + +set(CMAKE_MACOSX_RPATH ON) + +option(FEATURE_THREADS "A small amount of multi-threading support." ON) +if (FEATURE_THREADS) + add_definitions(-DFEATURE_THREADS) +endif (FEATURE_THREADS) + +option(DEVBUILD "Enable compiler warnings and turn them into errors." OFF) + +option(PTDUMP "Enable ptdump, a packet dumper") +option(PTXED "Enable ptxed, an instruction flow dumper") +option(PTTC "Enable pttc, a test compiler") +option(PTUNIT "Enable ptunit, a unit test system and libipt unit tests") +option(MAN "Enable man pages (requires pandoc)." OFF) + +set(PTT OFF) +if (BASH AND PTDUMP AND PTXED AND PTTC) + set(PTT ON) +endif () + +if (PTUNIT OR PTT) + ENABLE_TESTING() +endif() + +include_directories( + include + ${CMAKE_CURRENT_BINARY_DIR}/libipt/include +) + +if (PTUNIT) + include_directories( + ptunit/include + ) +endif (PTUNIT) + +if (CMAKE_HOST_WIN32) + include_directories( + include/windows + ) + + add_definitions( + # cl spells inline __inline in C + # + /Dinline=__inline + + # cl spells strtoll _strtoi64 + # + /Dstrtoll=_strtoi64 + + # cl spells strtoull _strtoui64 + # + /Dstrtoull=_strtoui64 + + # avoid annoying warnings about unsecure standard functions + # + /D_CRT_SECURE_NO_WARNINGS + ) + + # enable parallel build + # + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") + + if (DEVBUILD) + # compiler warnings + # + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") + + # warnings are errors + # + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /WX") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX") + endif (DEVBUILD) + + if (CMAKE_C_COMPILER_ID MATCHES "MSVC") + # prevent complaints on: + # - do {} while(0) constructs + # - int arr[] constructs + # + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4200") + + endif (CMAKE_C_COMPILER_ID MATCHES "MSVC") + + if (CMAKE_CXX_COMPILER_ID MATCHES "MSVC") + # prevent complaints on: + # - do {} while(0) constructs + # - int arr[] constructs + # + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4127") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4200") + + endif (CMAKE_CXX_COMPILER_ID MATCHES "MSVC") + +endif (CMAKE_HOST_WIN32) + +if (CMAKE_HOST_UNIX) + include_directories( + include/posix + ) + + if (CMAKE_C_COMPILER_ID MATCHES "Clang") + add_definitions( + # make asm directive work in c99 mode. + # + # from the clang user manual: + # "The parser recognizes "asm" and "typeof" as keywords in gnu* modes; + # the variants "__asm__" and "__typeof__" are recognized in all + # modes." + -Dasm=__asm__ + ) + endif (CMAKE_C_COMPILER_ID MATCHES "Clang") + + option(GCOV "Compile for GNU code coverage analysis." OFF) + + if (GCOV) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-arcs") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ftest-coverage") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ftest-coverage") + + link_libraries(gcov) + endif (GCOV) + + if (FEATURE_THREADS) + link_libraries(pthread) + endif (FEATURE_THREADS) + + # set the language + # + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") + + # windows-like dll export model + # + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden") + + if (DEVBUILD) + # compiler warnings + # + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wextra") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pedantic") + + # warnings are errors + # + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") + endif (DEVBUILD) + +endif (CMAKE_HOST_UNIX) + + +function(add_ptunit_test_base name) + if (PTUNIT) + add_executable(${name} ${ARGN}) + target_link_libraries(${name} ptunit) + + add_test(NAME ${name} COMMAND ${name}) + endif (PTUNIT) +endfunction(add_ptunit_test_base) + +function(add_ptunit_c_test name) + add_ptunit_test_base(ptunit-${name} test/src/ptunit-${name}.c ${ARGN}) +endfunction(add_ptunit_c_test) + +function(add_ptunit_cpp_test name) + add_ptunit_test_base(ptunit-${name} test/src/ptunit-${name}.cpp ${ARGN}) +endfunction(add_ptunit_cpp_test) + +function(add_ptunit_libraries name) + if (PTUNIT) + target_link_libraries(ptunit-${name} ${ARGN}) + endif (PTUNIT) +endfunction(add_ptunit_libraries) + + +add_subdirectory(libipt) + +if (PTDUMP) + add_subdirectory(ptdump) +endif (PTDUMP) +if (PTXED) + add_subdirectory(ptxed) +endif (PTXED) +if (PTTC) + add_subdirectory(pttc) +endif (PTTC) +if (PTUNIT) + add_subdirectory(ptunit) +endif (PTUNIT) +if (PTT) + add_subdirectory(test) +endif (PTT) +if (MAN) + add_subdirectory(doc/man) +endif (MAN) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..3e50456 --- /dev/null +++ b/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2013-2017, Intel Corporation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/README b/README new file mode 100644 index 0000000..d3a0251 --- /dev/null +++ b/README @@ -0,0 +1,75 @@ +Intel(R) Processor Trace Decoder Library +======================================== + +The Intel Processor Trace (Intel PT) Decoder Library is Intel's reference +implementation for decoding Intel PT. It can be used as a standalone library or +it can be partially or fully integrated into your tool. + +The library comes with a set of sample tools built on top of it and a test +system built on top of the sample tools. The samples demonstrate how to use the +library and may serve as a starting point for integrating the library into your +tool. + +Go to https://software.intel.com/en-us/intel-platform-analysis-library for +support of upcoming (non-public) processors (NDA required). + + +Contents +-------- + + README this file + + libipt A packet encoder/decoder library + + +Optional Contents and Samples +----------------------------- + + ptdump Example implementation of a packet dumper + + ptxed Example implementation of a trace disassembler + + pttc A trace test generator + + ptunit A simple unit test system + + script A collection of scripts + + test A collection of tests + + include A collection of substitute headers + + doc A document describing the build + A document describing how to get started + A document describing the usage of the decoder library + A document describing how to capture trace + A document describing pttc + + doc/man Man pages for the encoder/decoder library + + +Dependencies +------------ + +We use cmake for building. + + cmake The cross-platform open-source build system. + http://www.cmake.org + + +Other packages you need for some of the above optional components. + + xed The Intel x86 instruction encoder and decoder. + http://www.intel.com/software/xed + + This is needed to build and run ptxed. + + yasm The Yasm Modular Assembler + http://github.com/yasm + + This is needed to run pttc. + + pandoc A universal document converter + http://pandoc.org + + This is needed for man pages. diff --git a/doc/getting_started.md b/doc/getting_started.md new file mode 100755 index 0000000..ad142de --- /dev/null +++ b/doc/getting_started.md @@ -0,0 +1,93 @@ +Getting Started {#start} +======================== + + + +This chapter gives a brief introduction into the sample tools using one of the +tests as example. It assumes that you are already familiar with Intel(R) +Processor Trace (Intel PT) and that you already built the decoder library and +the sample tools. For detailed information about Intel PT, please refer to +chapter 11 of the Intel Architecture Instruction Set Extensions Programming +Reference at http://www.intel.com/products/processor/manuals/. + +Start by compiling the loop-tnt test. It consists of a small assembly program +with interleaved Intel PT directives: + + $ pttc test/src/loop-tnt.ptt + loop-tnt-ptxed.exp + loop-tnt-ptdump.exp + +This produces the following output files: + + loop-tnt.lst a yasm assembly listing file + loop-tnt.bin a raw binary file + loop-tnt.pt a Intel PT file + loop-tnt-ptxed.exp the expected ptxed output + loop-tnt-ptdump.exp the expected ptdump output + +The latter two files are generated based on the `@pt .exp()` directives +found in the `.ptt` file. They are used for automated testing. See +script/test.bash for details on that. + + +Use `ptdump` to dump the Intel PT packets: + + $ ptdump loop-tnt.pt + 0000000000000000 psb + 0000000000000010 fup 3: 0x0000000000100000, ip=0x0000000000100000 + 0000000000000017 mode.exec cs.d=0, cs.l=1 (64-bit mode) + 0000000000000019 psbend + 000000000000001b tnt8 !!. + 000000000000001c tip.pgd 3: 0x0000000000100013, ip=0x0000000000100013 + +The ptdump tool takes an Intel PT file as input and dumps the packets in +human-readable form. The number on the very left is the offset into the Intel +PT packet stream in hex. This is followed by the packet opcode and payload. + + +Use `ptxed` for reconstructing the execution flow. For this, you need the Intel +PT file as well as the corresponding binary image. You need to specify the load +address given by the org directive in the .ptt file when using a raw binary +file. + + $ ptxed --pt loop-tnt.pt --raw loop-tnt.bin:0x100000 + 0x0000000000100000 mov rax, 0x0 + 0x0000000000100007 jmp 0x10000d + 0x000000000010000d cmp rax, 0x1 + 0x0000000000100011 jle 0x100009 + 0x0000000000100009 add rax, 0x1 + 0x000000000010000d cmp rax, 0x1 + 0x0000000000100011 jle 0x100009 + 0x0000000000100009 add rax, 0x1 + 0x000000000010000d cmp rax, 0x1 + 0x0000000000100011 jle 0x100009 + [disabled] + +Ptxed prints disassembled instructions in execution order as well as status +messages enclosed in brackets. diff --git a/doc/howto_build.md b/doc/howto_build.md new file mode 100755 index 0000000..01a8811 --- /dev/null +++ b/doc/howto_build.md @@ -0,0 +1,191 @@ +Building the Intel(R) Processor Trace (Intel PT) Decoder Library and Samples {#build} +============================================================================ + + + +This chapter gives step-by-step instructions for building the library and the +sample tools using cmake. For detailed information on cmake, see +http://www.cmake.org. + + +## Configuration + +Besides the standard cmake options of build type and install directory, you will +find project-specific options for enabling optional features, optional +components, or optional build variants. + + +### Optional Components + +By default, only the decoder library is built. Other components can be enabled +by setting the respective cmake variable to ON. + +The following optional components are availble: + + PTUNIT A simple unit test framework. + A collection of unit tests for libipt. + + PTDUMP A packet dumper example. + + PTXED A trace disassembler example. + + PTTC A trace test generator. + + +### Optional Features + +Features are enabled by setting the respective FEATURE_ cmake variable. +This causes the FEATURE_ pre-processor macro to be defined and may also +cause additional source files to be compiled and additional libraries to be +linked. + +Features are enabled globally and will be used by all components that support +the feature. The following features are supported: + + FEATURE_ELF Support for the ELF object format. + + This feature requires the elf.h header. + + + FEATURE_THREADS Support some amount of multi-threading. + + This feature makes image functions thread-safe. + + +### Build Variants + +Some build variants depend on libraries or header files that may not be +available on all supported platforms. + + GCOV Support for code coverage using libgcov. + + This build variant requires libgcov and is not availble + on Windows. + + + DEVBUILD Enable compiler warnings and turn them into errors. + + +### Version Settings + +The major and minor version numbers are set in the sources and must be changed +there. You can set the build number and an arbitrary extension string. +build. + + PT_VERSION_BUILD The build number. + + Defaults to zero. + + + PT_VERSION_EXT An arbitrary version extension string. + + Defaults to the empty string. + + +### Dependencies + +In order to build ptxed, the location of the XED library and the XED header +files must be specified. + + XED_INCLUDE Path to the directory containing the XED header files. + + XED_LIBDIR Path to the directory containing the XED library. + + +When using XED from a PIN distribution, the respective directories are located +in `extras/xed2-/`. + + +## Building on Linux``*`` and OS X``*`` + +We recommend out-of-tree builds. Start by creating the destination directory +and navigating into it: + + $ mkdir -p /path/to/dest + $ cd /path/to/dest + + +From here, call cmake with the top-level source directory as argument. You may +already pass some or all of the cmake variables as arguments to cmake. Without +arguments, cmake uses default values. + + $ cmake /path/to/src + + +If you have not passed values for XED_INCLUDE or XED_LIBDIR, you need to +configure them now if you want to build ptxed. You may also use this command to +change the configuration at any time later on. + + $ make edit_cache + + +After configuring the cmake cache, you can build either specific targets or +everything using one of: + + $ make + $ make + + +Use the help make target to learn about available make targets: + + $ make help + + + +## Building on Windows``*`` + +We recommend using the cmake GUI. After starting the cmake GUI, fill in the +following fields: + + Where is the source code: Path to the top-level source directory. + + Where to build the binaries: Path to the destination directory. + + +We recommend out-of-tree builds, so the build directory should not be the same +as or below the source directory. After this first configuration step, press +the + + Configure + +button and select the builder you want to use. + +Cmake will now populate the remainder of the window with configuration options. +Please make sure to specify at least XED_INCLUDE and XED_LIBDIR if you want to +build ptxed. After completing the configuration, press the + + Generate + +button. If you selected a Visual Studio generator in the first step, cmake will +now generate a Visual Studio solution. You can repeat this step if you want to +change the configuration later on. Beware that you always need to press the +Generate button after changing the configuration. + +In the case of a Visual Studio generator, you may now open the generated Visual +Studio solution and build the library and samples. diff --git a/doc/howto_capture.md b/doc/howto_capture.md new file mode 100644 index 0000000..2bf76d8 --- /dev/null +++ b/doc/howto_capture.md @@ -0,0 +1,248 @@ +Capturing Intel(R) Processor Trace (Intel PT) {#capture} +============================================= + + + +This chapter describes how to capture Intel PT for processing with libipt. For +illustration, we use the sample tools ptdump and ptxed. + + +## Capturing Intel PT on Linux + +Starting with version 4.1, the Linux kernel supports Intel PT via the perf_event +kernel interface. Starting with version 4.3, the perf user-space tool will +support Intel PT as well. + + +### Capturing Intel PT via Linux perf_event + +We start with setting up a perf_event_attr object for capturing Intel PT. The +structure is declared in `/usr/include/linux/perf_event.h`. + +The Intel PT PMU type is dynamic. Its value can be read from +`/sys/bus/event_source/devices/intel_pt/type`. + +~~~{.c} + struct perf_event_attr attr; + + memset(&attr, 0, sizeof(attr)); + attr.size = sizeof(attr); + attr.type = (); + + attr.exclude_kernel = 1; + ... +~~~ + + +Once all desired fields have been set, we can open a perf_event counter for +Intel PT. See `man 2 perf_event_open` for details. In our example, we +configure it for tracing a single thread. + +The system call returns a file descriptor on success, `-1` otherwise. + +~~~{.c} + int fd; + + fd = syscall(SYS_perf_event_open, &attr, , -1, -1, 0); +~~~ + + +The Intel PT trace is captured in the AUX area, which has been introduced with +kernel 4.1. The DATA area contains sideband information such as image changes +that are necessary for decoding the trace. + +In theory, both areas can be configured as circular buffers or as linear buffers +by mapping them read-only or read-write, respectively. When configured as +circular buffer, new data will overwrite older data. When configured as linear +buffer, the user is expected to continuously read out the data and update the +buffer's tail pointer. New data that do not fit into the buffer will be +dropped. + +When using the AUX area, its size and offset have to be filled into the +`perf_event_mmap_page`, which is mapped together with the DATA area. This +requires the DATA area to be mapped read-write and hence configured as linear +buffer. In our example, we configure the AUX area as circular buffer. + +Note that the size of both the AUX and the DATA area has to be a power of two +pages. The DATA area needs one additional page to contain the +`perf_event_mmap_page`. + +~~~{.c} + struct perf_event_mmap_page *header; + void *base, *data, *aux; + + base = mmap(NULL, (1+2**n) * PAGE_SIZE, PROT_WRITE, MAP_SHARED, fd, 0); + if (base == MAP_FAILED) + return (); + + header = base; + data = base + header->data_offset; + + header->aux_offset = header->data_offset + header->data_size; + header->aux_size = (2**m) * PAGE_SIZE; + + aux = mmap(NULL, header->aux_size, PROT_READ, MAP_SHARED, fd, + header->aux_offset); + if (aux == MAP_FAILED) + return (); +~~~ + + +### Capturing Intel PT via the perf user-space tool + +Starting with kernel 4.3, the perf user-space tool can be used to capture Intel +PT with the `intel_pt` event. See tools/perf/Documentation in the Linux kernel +tree for further information. In this text, we describe how to use the captured +trace with the ptdump and ptxed sample tools. + +We start with capturing some Intel PT trace using the intel_pt event. + +~~~{.sh} + $ perf record -e intel_pt//u --per-thread -- grep -r foo /usr/include + [ perf record: Woken up 26 times to write data ] + [ perf record: Captured and wrote 51.969 MB perf.data ] +~~~ + + +This generates a `perf.data` file that contains the Intel PT trace, the sideband +information, and some metadata. To process the trace with libipt, we need to +extract the Intel PT trace into one file per thread or cpu. + +Looking at the raw trace dump of `perf script -D`, we notice +`PERF_RECORD_AUXTRACE` records. The raw Intel PT trace is contained directly +after such records. We can extract it with the `dd` command. The arguments to +`dd` can be computed from the record's fields. This can be done automatically, +for example with an AWK script. + +~~~{.awk} + /PERF_RECORD_AUXTRACE / { + offset = strtonum($1) + hsize = strtonum(substr($2, 2)) + size = strtonum($5) + idx = strtonum($11) + + ofile = sprintf("perf.data-aux-idx%d.bin", idx) + begin = offset + hsize + + cmd = sprintf("dd if=perf.data of=%s conv=notrunc oflag=append ibs=1 \ + skip=%d count=%d status=none", ofile, begin, size) + + system(cmd) + } +~~~ + +The libipt tree contains such a script in `script/perf-read-aux.bash`. + +In addition to the Intel PT trace, we need the traced memory image. When +tracing a single process where the memory image does not change during tracing, +we can construct the memory image by examining `PERF_RECORD_MMAP` and +`PERF_RECORD_MMAP2` records. This can again be done automatically, for example +with an AWK script. + +~~~{.awk} + function handle_mmap(file, vaddr) { + if (match(file, /\[.*\]/) != 0) { + # ignore 'virtual' file names like [kallsyms] + } + else if (match(file, /\.ko$/) != 0) { + # ignore kernel objects + # + # use /proc/kcore + } + else { + printf(" --elf %s:0x%x", file, vaddr) + } + } + + /PERF_RECORD_MMAP / { + vaddr = strtonum(substr($5, 2)) + file = $9 + + handle_mmap(file, vaddr) + } + + /PERF_RECORD_MMAP2 / { + vaddr = strtonum(substr($5, 2)) + file = $12 + + handle_mmap(file, vaddr) + } +~~~ + +The above script generates options for the `ptxed` sample tool. The libipt tree +contains such a script in `script/perf-read-image.bash`. + +Let's put it all together. + +~~~{.sh} + $ perf record -e intel_pt//u --per-thread -- grep -r foo /usr/include + [ perf record: Woken up 26 times to write data ] + [ perf record: Captured and wrote 51.969 MB perf.data ] + $ script/perf-read-aux.bash + $ script/perf-read-image.bash | xargs ptxed --cpu 6/61 --pt perf.data-aux-idx0.bin +~~~ + + +### Sideband support + +The above example does not consider sideband information. It therefore only +works for not-too-complicated single-threaded applications. For tracing +multi-threaded applications or for system-wide tracing (including ring-3), +sideband information is required for decoding the trace. + +Sideband information can be defined as any information necessary for decoding +Intel PT that is not contained in the trace stream itself. We already supply: + + * the binary files whose execution was traced and the virtual address at which + each file was loaded + * the family/model/stepping of the processor on which the trace was recorded + * some information regarding timing + + +What's missing is information about changes to the traced memory image while the +trace is being recorded: + + * memory map/unamp information + * context switch information + + +On Linux, this information can be found in the form of PERF_EVENT records in the +DATA buffer or in the perf.data file respectively. + +Collection and interpretation of this information is currently left completely +to the user. + + +### Capturing Intel PT via Simple-PT + +The Simple-PT project on github supports capturing Intel PT on Linux with an +alternative kernel driver. The spt decoder supports sideband information. + +See the project's page at https://github.com/andikleen/simple-pt for more +information including examples. diff --git a/doc/howto_libipt.md b/doc/howto_libipt.md new file mode 100644 index 0000000..540412d --- /dev/null +++ b/doc/howto_libipt.md @@ -0,0 +1,1025 @@ +Decoding Intel(R) Processor Trace Using libipt {#libipt} +======================================================== + + + +This chapter describes how to use libipt for various tasks around Intel +Processor Trace (Intel PT). For code examples, refer to the sample tools that +are contained in the source tree: + + * *ptdump* A packet dumper example. + * *ptxed* A control-flow reconstruction example. + * *pttc* A packet encoder example. + + +For detailed information about Intel PT, please refer to chapter 36 of the Intel +Software Developer's Manual at http://www.intel.com/sdm. + + +## Introduction + +The libipt decoder library provides multiple layers of abstraction ranging from +packet encoding and decoding to full execution flow reconstruction. The layers +are organized as follows: + + * *packets* This layer deals with raw Intel PT packets. + + * *events* This layer deals with packet combinations that + encode higher-level events. + + * *instruction flow* This layer deals with the execution flow on the + instruction level. + + * *block* This layer deals with the execution flow on the + instruction level. + + It is faster than the instruction flow decoder but + requires a small amount of post-processing. + + +Each layer provides its own encoder or decoder struct plus a set of functions +for allocating and freeing encoder or decoder objects and for synchronizing +decoders onto the Intel PT packet stream. Function names are prefixed with +`pt__` where `` is an abbreviation of the layer name. The following +abbreviations are used: + + * *enc* Packet encoding (packet layer). + * *pkt* Packet decoding (packet layer). + * *qry* Event (or query) layer. + * *insn* Instruction flow layer. + * *blk* Block layer. + + +Here is some generic example code for working with decoders: + +~~~{.c} + struct pt__decoder *decoder; + struct pt_config config; + int errcode; + + memset(&config, 0, sizeof(config)); + config.size = sizeof(config); + config.begin = ; + config.end = ; + config.cpu = ; + config... + + decoder = pt__alloc_decoder(&config); + if (!decoder) + (errcode); + + errcode = pt__sync_(decoder); + if (errcode < 0) + (errcode); + + (decoder); + + pt__free_decoder(decoder); +~~~ + +First, configure the decoder. As a minimum, the size of the config struct and +the `begin` and `end` of the buffer containing the Intel PT data need to be set. +Configuration options details will be discussed later in this chapter. In the +case of packet encoding, this is the begin and end address of the pre-allocated +buffer, into which Intel PT packets shall be written. + +Next, allocate a decoder object for the layer you are interested in. A return +value of NULL indicates an error. There is no further information available on +the exact error condition. Most of the time, however, the error is the result +of an incomplete or inconsistent configuration. + +Before the decoder can be used, it needs to be synchronized onto the Intel PT +packet stream specified in the configuration. The only exception to this is the +packet encoder, which is implicitly synchronized onto the beginning of the Intel +PT buffer. + +Depending on the type of decoder, one or more synchronization options are +available. + + * `pt__sync_forward()` Synchronize onto the next PSB in forward + direction (or the first PSB if not yet + synchronized). + + * `pt__sync_backward()` Synchronize onto the next PSB in backward + direction (or the last PSB if not yet + synchronized). + + * `pt__sync_set()` Set the synchronization position to a + user-defined location in the Intel PT packet + stream. + There is no check whether the specified + location makes sense or is valid. + + +After synchronizing, the decoder can be used. While decoding, the decoder +stores the location of the last PSB it encountered during normal decode. +Subsequent calls to pt__sync_forward() will start searching from that +location. This is useful for re-synchronizing onto the Intel PT packet stream +in case of errors. An example of a typical decode loop is given below: + +~~~{.c} + for (;;) { + int errcode; + + errcode = (decoder); + if (errcode >= 0) + continue; + + if (errcode == -pte_eos) + return; + + (errcode); + + do { + errcode = pt__sync_forward(decoder); + + if (errcode == -pte_eos) + return; + } while (errcode < 0); + } +~~~ + +You can get the current decoder position as offset into the Intel PT buffer via: + + pt__get_offset() + + +You can get the position of the last synchronization point as offset into the +Intel PT buffer via: + + pt__get_sync_offset() + + +Each layer will be discussed in detail below. In the remainder of this section, +general functionality will be considered. + + +### Version + +You can query the library version using: + + * `pt_library_version()` + + +This function returns a version structure that can be used for compatibility +checks or simply for reporting the version of the decoder library. + + +### Errors + +The library uses a single error enum for all layers. + + * `enum pt_error_code` An enumeration of encode and decode errors. + + +Errors are typically represented as negative pt_error_code enumeration constants +and returned as an int. The library provides two functions for dealing with +errors: + + * `pt_errcode()` Translate an int return value into a pt_error_code + enumeration constant. + + * `pt_errstr()` Returns a human-readable error string. + + +Not all errors may occur on every layer. Every API function specifies the +errors it may return. + + +### Configuration + +Every encoder or decoder allocation function requires a configuration argument. +Some of its fields have already been discussed in the example above. Refer to +the `intel-pt.h` header for detailed and up-to-date documentation of each field. + +As a minimum, the `size` field needs to be set to `sizeof(struct pt_config)` and +`begin` and `end` need to be set to the Intel PT buffer to use. + +The size is used for detecting library version mismatches and to provide +backwards compatibility. Without the proper `size`, decoder allocation will +fail. + +Although not strictly required, it is recommended to also set the `cpu` field to +the processor, on which Intel PT has been collected (for decoders), or for which +Intel PT shall be generated (for encoders). This allows implementing +processor-specific behavior such as erratum workarounds. + + +## The Packet Layer + +This layer deals with Intel PT packet encoding and decoding. It can further be +split into three sub-layers: opcodes, encoding, and decoding. + + +### Opcodes + +The opcodes layer provides enumerations for all the bits necessary for Intel PT +encoding and decoding. The enumeration constants can be used without linking to +the decoder library. There is no encoder or decoder struct associated with this +layer. See the intel-pt.h header file for details. + + +### Packet Encoding + +The packet encoding layer provides support for encoding Intel PT +packet-by-packet. Start by configuring and allocating a `pt_packet_encoder` as +shown below: + +~~~{.c} + struct pt_encoder *encoder; + struct pt_config config; + int errcode; + + memset(&config, 0, sizeof(config)); + config.size = sizeof(config); + config.begin = ; + config.end = ; + config.cpu = ; + + encoder = pt_alloc_encoder(&config); + if (!encoder) + (errcode); +~~~ + +For packet encoding, only the mandatory config fields need to be filled in. + +The allocated encoder object will be implicitly synchronized onto the beginning +of the Intel PT buffer. You may change the encoder's position at any time by +calling `pt_enc_sync_set()` with the desired buffer offset. + +Next, fill in a `pt_packet` object with details about the packet to be encoded. +You do not need to fill in the `size` field. The needed size is computed by the +encoder. There is no consistency check with the size specified in the packet +object. The following example encodes a TIP packet: + +~~~{.c} + struct pt_packet_encoder *encoder = ...; + struct pt_packet packet; + int errcode; + + packet.type = ppt_tip; + packet.payload.ip.ipc = pt_ipc_update_16; + packet.payload.ip.ip = ; +~~~ + +For IP packets, for example FUP or TIP.PGE, there is no need to mask out bits in +the `ip` field that will not be encoded in the packet due to the specified IP +compression in the `ipc` field. The encoder will ignore them. + +There are no consistency checks whether the specified IP compression in the +`ipc` field is allowed in the current context or whether decode will result in +the full IP specified in the `ip` field. + +Once the packet object has been filled, it can be handed over to the encoder as +shown here: + +~~~{.c} + errcode = pt_enc_next(encoder, &packet); + if (errcode < 0) + (errcode); +~~~ + +The encoder will encode the packet, write it into the Intel PT buffer, and +advance its position to the next byte after the packet. On a successful encode, +it will return the number of bytes that have been written. In case of errors, +nothing will be written and the encoder returns a negative error code. + + +### Packet Decoding + +The packet decoding layer provides support for decoding Intel PT +packet-by-packet. Start by configuring and allocating a `pt_packet_decoder` as +shown here: + +~~~{.c} + struct pt_packet_decoder *decoder; + struct pt_config config; + int errcode; + + memset(&config, 0, sizeof(config)); + config.size = sizeof(config); + config.begin = ; + config.end = ; + config.cpu = ; + config.decode.callback = ; + config.decode.context = ; + + decoder = pt_pkt_alloc_decoder(&config); + if (!decoder) + (errcode); +~~~ + +For packet decoding, an optional decode callback function may be specified in +addition to the mandatory config fields. If specified, the callback function +will be called for packets the decoder does not know about. If there is no +decode callback specified, the decoder will return `-pte_bad_opc`. In addition +to the callback function pointer, an optional pointer to user-defined context +information can be specified. This context will be passed to the decode +callback function. + +Before the decoder can be used, it needs to be synchronized onto the Intel PT +packet stream. Packet decoders offer three synchronization functions. To +iterate over synchronization points in the Intel PT packet stream in forward or +backward direction, use one of the following two functions respectively: + + pt_pkt_sync_forward() + pt_pkt_sync_backward() + + +To manually synchronize the decoder at a particular offset into the Intel PT +packet stream, use the following function: + + pt_pkt_sync_set() + + +There are no checks to ensure that the specified offset is at the beginning of a +packet. The example below shows synchronization to the first synchronization +point: + +~~~{.c} + struct pt_packet_decoder *decoder; + int errcode; + + errcode = pt_pkt_sync_forward(decoder); + if (errcode < 0) + (errcode); +~~~ + +The decoder will remember the last synchronization packet it decoded. +Subsequent calls to `pt_pkt_sync_forward` and `pt_pkt_sync_backward` will use +this as their starting point. + +You can get the current decoder position as offset into the Intel PT buffer via: + + pt_pkt_get_offset() + + +You can get the position of the last synchronization point as offset into the +Intel PT buffer via: + + pt_pkt_get_sync_offset() + + +Once the decoder is synchronized, you can iterate over packets by repeated calls +to `pt_pkt_next()` as shown in the following example: + +~~~{.c} + struct pt_packet_decoder *decoder; + int errcode; + + for (;;) { + struct pt_packet packet; + + errcode = pt_pkt_next(decoder, &packet, sizeof(packet)); + if (errcode < 0) + break; + + (&packet); + } +~~~ + + +## The Event Layer + +The event layer deals with packet combinations that encode higher-level events. +It is used for reconstructing execution flow for users who need finer-grain +control not available via the instruction flow layer or for users who want to +integrate execution flow reconstruction with other functionality more tightly +than it would be possible otherwise. + +This section describes how to use the query decoder for reconstructing execution +flow. See the instruction flow decoder as an example. Start by configuring and +allocating a `pt_query_decoder` as shown below: + +~~~{.c} + struct pt_query_decoder *decoder; + struct pt_config config; + int errcode; + + memset(&config, 0, sizeof(config)); + config.size = sizeof(config); + config.begin = ; + config.end = ; + config.cpu = ; + config.decode.callback = ; + config.decode.context = ; + + decoder = pt_qry_alloc_decoder(&config); + if (!decoder) + (errcode); +~~~ + +An optional packet decode callback function may be specified in addition to the +mandatory config fields. If specified, the callback function will be called for +packets the decoder does not know about. The query decoder will ignore the +unknown packet except for its size in order to skip it. If there is no decode +callback specified, the decoder will abort with `-pte_bad_opc`. In addition to +the callback function pointer, an optional pointer to user-defined context +information can be specified. This context will be passed to the decode +callback function. + +Before the decoder can be used, it needs to be synchronized onto the Intel PT +packet stream. To iterate over synchronization points in the Intel PT packet +stream in forward or backward direction, the query decoders offer the following +two synchronization functions respectively: + + pt_qry_sync_forward() + pt_qry_sync_backward() + + +To manually synchronize the decoder at a synchronization point (i.e. PSB packet) +in the Intel PT packet stream, use the following function: + + pt_qry_sync_set() + + +After successfully synchronizing, the query decoder will start reading the PSB+ +header to initialize its internal state. If tracing is enabled at this +synchronization point, the IP of the instruction, at which decoding should be +started, is returned. If tracing is disabled at this synchronization point, it +will be indicated in the returned status bits (see below). In this example, +synchronization to the first synchronization point is shown: + +~~~{.c} + struct pt_query_decoder *decoder; + uint64_t ip; + int status; + + status = pt_qry_sync_forward(decoder, &ip); + if (status < 0) + (status); +~~~ + +In addition to a query decoder, you will need an instruction decoder for +decoding and classifying instructions. + + +#### In A Nutshell + +After synchronizing, you begin decoding instructions starting at the returned +IP. As long as you can determine the next instruction in execution order, you +continue on your own. Only when the next instruction cannot be determined by +examining the current instruction, you would ask the query decoder for guidance: + + * If the current instruction is a conditional branch, the + `pt_qry_cond_branch()` function will tell whether it was taken. + + * If the current instruction is an indirect branch, the + `pt_qry_indirect_branch()` function will provide the IP of its destination. + + +~~~{.c} + struct pt_query_decoder *decoder; + uint64_t ip; + + for (;;) { + struct insn; + + insn = (ip); + + ip += (insn); + + if ((insn)) { + int status, taken; + + status = pt_qry_cond_branch(decoder, &taken); + if (status < 0) + (status); + + if (taken) + ip += (insn); + } else if ((insn)) { + int status; + + status = pt_qry_indirect_branch(decoder, &ip); + if (status < 0) + (status); + } + } +~~~ + + +Certain aspects such as, for example, asynchronous events or synchronizing at a +location where tracing is disabled, have been ignored so far. Let us consider +them now. + + +#### Queries + +The query decoder provides four query functions: + + * `pt_qry_cond_branch()` Query whether the next conditional branch was + taken. + + * `pt_qry_indirect_branch()` Query for the destination IP of the next + indirect branch. + + * `pt_qry_event()` Query for the next event. + + * `pt_qry_time()` Query for the current time. + + +Each function returns either a positive vector of status bits or a negative +error code. For details on status bits and error conditions, please refer to +the `pt_status_flag` and `pt_error_code` enumerations in the intel-pt.h header. + +The `pts_ip_suppressed` status bit is used to indicate that no IP is available +at functions that are supposed to return an IP. Examples are the indirect +branch query function and both synchronization functions. + +The `pts_event_pending` status bit is used to indicate that there is an event +pending. You should query for this event before continuing execution flow +reconstruction. + +The `pts_eos` status bit is used to indicate the end of the trace. Any +subsequent query will return -pte_eos. + + +#### Events + +Events are signaled ahead of time. When you query for pending events as soon as +they are indicated, you will be aware of asynchronous events before you reach +the instruction associated with the event. + +For example, if tracing is disabled at the synchronization point, the IP will be +suppressed. In this case, it is very likely that a tracing enabled event is +signaled. You will also get events for initializing the decoder state after +synchronizing onto the Intel PT packet stream. For example, paging or execution +mode events. + +See the `enum pt_event_type` and `struct pt_event` in the intel-pt.h header for +details on possible events. This document does not give an example of event +processing. Refer to the implementation of the instruction flow decoder in +pt_insn.c for details. + + +#### Timing + +To be able to signal events, the decoder reads ahead until it arrives at a query +relevant packet. Errors encountered during that time will be postponed until +the respective query call. This reading ahead affects timing. The decoder will +always be a few packets ahead. When querying for the current time, the query +will return the time at the decoder's current packet. This corresponds to the +time at our next query. + + +#### Return Compression + +If Intel PT has been configured to compress returns, a successfully compressed +return is represented as a conditional branch instead of an indirect branch. +For a RET instruction, you first query for a conditional branch. If the query +succeeds, it should indicate that the branch was taken. In that case, the +return has been compressed. A not taken branch indicates an error. If the +query fails, the return has not been compressed and you query for an indirect +branch. + +There is no guarantee that returns will be compressed. Even though return +compression has been enabled, returns may still be represented as indirect +branches. + +To reconstruct the execution flow for compressed returns, you would maintain a +stack of return addresses. For each call instruction, push the IP of the +instruction following the call onto the stack. For compressed returns, pop the +topmost IP from the stack. See pt_retstack.h and pt_retstack.c for a sample +implementation. + + +## The Instruction Flow Layer + +The instruction flow layer provides a simple API for iterating over instructions +in execution order. Start by configuring and allocating a `pt_insn_decoder` as +shown below: + +~~~{.c} + struct pt_insn_decoder *decoder; + struct pt_config config; + int errcode; + + memset(&config, 0, sizeof(config)); + config.size = sizeof(config); + config.begin = ; + config.end = ; + config.cpu = ; + config.decode.callback = ; + config.decode.context = ; + + decoder = pt_insn_alloc_decoder(&config); + if (!decoder) + (errcode); +~~~ + +An optional packet decode callback function may be specified in addition to the +mandatory config fields. If specified, the callback function will be called for +packets the decoder does not know about. The decoder will ignore the unknown +packet except for its size in order to skip it. If there is no decode callback +specified, the decoder will abort with `-pte_bad_opc`. In addition to the +callback function pointer, an optional pointer to user-defined context +information can be specified. This context will be passed to the decode +callback function. + +The image argument is optional. If no image is given, the decoder will use an +empty default image that can be populated later on and that is implicitly +destroyed when the decoder is freed. See below for more information on this. + + +#### The Traced Image + +In addition to the Intel PT configuration, the instruction flow decoder needs to +know the memory image for which Intel PT has been recorded. This memory image +is represented by a `pt_image` object. If decoding failed due to an IP lying +outside of the traced memory image, `pt_insn_next()` will return `-pte_nomap`. + +Use `pt_image_alloc()` to allocate and `pt_image_free()` to free an image. +Images may not be shared. Every decoder must use a different image. Use this +to prepare the image in advance or if you want to switch between images. + +Every decoder provides an empty default image that is used if no image is +specified during allocation. The default image is implicitly destroyed when the +decoder is freed. It can be obtained by calling `pt_insn_get_image()`. Use +this if you only use one decoder and one image. + +An image is a collection of contiguous, non-overlapping memory regions called +`sections`. Starting with an empty image, it may be populated with repeated +calls to `pt_image_add_file()` or `pt_image_add_cached()`, one for each section, +or with a call to `pt_image_copy()` to add all sections from another image. If +a newly added section overlaps with an existing section, the existing section +will be truncated or split to make room for the new section. + +In some cases, the memory image may change during the execution. You can use +the `pt_image_remove_by_filename()` function to remove previously added sections +by their file name and `pt_image_remove_by_asid()` to remove all sections for an +address-space. + +In addition to adding sections, you can register a callback function for reading +memory using `pt_image_set_callback()`. The `context` parameter you pass +together with the callback function pointer will be passed to your callback +function every time it is called. There can only be one callback at any time. +Adding a new callback will remove any previously added callback. To remove the +callback function, pass `NULL` to `pt_image_set_callback()`. + +Callback and files may be combined. The callback function is used whenever +the memory cannot be found in any of the image's sections. + +If more than one process is traced, the memory image may change when the process +context is switched. To simplify handling this case, an address-space +identifier may be passed to each of the above functions to define separate +images for different processes at the same time. The decoder will select the +correct image based on context switch information in the Intel PT trace. If +you want to manage this on your own, you can use `pt_insn_set_image()` to +replace the image a decoder uses. + + +#### The Traced Image Section Cache + +When using multiple decoders that work on related memory images it is desirable +to share image sections between decoders. The underlying file sections will be +mapped only once per image section cache. + +Use `pt_iscache_alloc()` to allocate and `pt_iscache_free()` to free an image +section cache. Freeing the cache does not destroy sections added to the cache. +They remain valid until they are no longer used. + +Use `pt_iscache_add_file()` to add a file section to an image section cache. +The function returns an image section identifier (ISID) that uniquely identifies +the section in this cache. Use `pt_image_add_cached()` to add a file section +from an image section cache to an image. + +Multiple image section caches may be used at the same time but it is recommended +not to mix sections from different image section caches in one image. + +A traced image section cache can also be used for reading an instruction's +memory via its IP and ISID as provided in `struct pt_insn`. + + +#### Synchronizing + +Before the decoder can be used, it needs to be synchronized onto the Intel PT +packet stream. To iterate over synchronization points in the Intel PT packet +stream in forward or backward directions, the instruction flow decoders offer +the following two synchronization functions respectively: + + pt_insn_sync_forward() + pt_insn_sync_backward() + + +To manually synchronize the decoder at a synchronization point (i.e. PSB packet) +in the Intel PT packet stream, use the following function: + + pt_insn_sync_set() + + +The example below shows synchronization to the first synchronization point: + +~~~{.c} + struct pt_insn_decoder *decoder; + int errcode; + + errcode = pt_insn_sync_forward(decoder); + if (errcode < 0) + (errcode); +~~~ + +The decoder will remember the last synchronization packet it decoded. +Subsequent calls to `pt_insn_sync_forward` and `pt_insn_sync_backward` will use +this as their starting point. + +You can get the current decoder position as offset into the Intel PT buffer via: + + pt_insn_get_offset() + + +You can get the position of the last synchronization point as offset into the +Intel PT buffer via: + + pt_insn_get_sync_offset() + + +#### Iterating + +Once the decoder is synchronized, you can iterate over instructions in execution +flow order by repeated calls to `pt_insn_next()` as shown in the following +example: + +~~~{.c} + struct pt_insn_decoder *decoder; + int errcode; + + for (;;) { + struct pt_insn insn; + + errcode = pt_insn_next(decoder, &insn, sizeof(insn)); + + if (insn.iclass != ptic_error) + (&insn); + + if (errcode < 0) + break; + } +~~~ + +For each instruction, you get its IP, its size in bytes, the raw memory, an +identifier for the image section that contained it, the current execution mode, +and the speculation state, that is whether the instruction has been executed +speculatively. In addition, you get a coarse classification that can be used +for further processing without the need for a full instruction decode. + +If a traced image section cache is used the image section identifier can be used +to trace an instruction back to the binary file that contained it. This allows +mapping the instruction back to source code using the debug information +contained in or reachable via the binary file. + +You also get some information about events that occured either before or after +executing the instruction like enable or disable tracing. For detailed +information about instructions, see `enum pt_insn_class` and `struct pt_insn` in +the intel-pt.h header file. + +Beware that `pt_insn_next()` may indicate errors that occur after the returned +instruction. The returned instruction is valid if its `iclass` field is set. + + +## The Block Layer + +The block layer provides a simple API for iterating over blocks of sequential +instructions in execution order. The instructions in a block are sequential in +the sense that no trace is required for reconstructing the instructions. The IP +of the first instruction is given in `struct pt_block` and the IP of other +instructions in the block can be determined by decoding and examining the +previous instruction. + +Start by configuring and allocating a `pt_block_decoder` as shown below: + +~~~{.c} + struct pt_block_decoder *decoder; + struct pt_config config; + + memset(&config, 0, sizeof(config)); + config.size = sizeof(config); + config.begin = ; + config.end = ; + config.cpu = ; + config.decode.callback = ; + config.decode.context = ; + + decoder = pt_blk_alloc_decoder(&config); +~~~ + +An optional packet decode callback function may be specified in addition to the +mandatory config fields. If specified, the callback function will be called for +packets the decoder does not know about. The decoder will ignore the unknown +packet except for its size in order to skip it. If there is no decode callback +specified, the decoder will abort with `-pte_bad_opc`. In addition to the +callback function pointer, an optional pointer to user-defined context +information can be specified. This context will be passed to the decode +callback function. + + +#### Synchronizing + +Before the decoder can be used, it needs to be synchronized onto the Intel PT +packet stream. To iterate over synchronization points in the Intel PT packet +stream in forward or backward directions, the block decoder offers the following +two synchronization functions respectively: + + pt_blk_sync_forward() + pt_blk_sync_backward() + + +To manually synchronize the decoder at a synchronization point (i.e. PSB packet) +in the Intel PT packet stream, use the following function: + + pt_blk_sync_set() + + +The example below shows synchronization to the first synchronization point: + +~~~{.c} + struct pt_block_decoder *decoder; + int errcode; + + errcode = pt_blk_sync_forward(decoder); + if (errcode < 0) + (errcode); +~~~ + +The decoder will remember the last synchronization packet it decoded. +Subsequent calls to `pt_blk_sync_forward` and `pt_blk_sync_backward` will use +this as their starting point. + +You can get the current decoder position as offset into the Intel PT buffer via: + + pt_blk_get_offset() + + +You can get the position of the last synchronization point as offset into the +Intel PT buffer via: + + pt_blk_get_sync_offset() + + +#### Iterating + +Once the decoder is synchronized, it can be used to iterate over blocks of +instructions in execution flow order by repeated calls to `pt_blk_next()` as +shown in the following example: + +~~~{.c} + struct pt_block_decoder *decoder; + int errcode; + + for (;;) { + struct pt_block block; + + errcode = pt_blk_next(decoder, &block, sizeof(block)); + + if (block.ninsn > 0) + (&block); + + if (errcode < 0) + break; + } +~~~ + +A block contains enough information to reconstruct the instructions. See +`struct pt_block` in `intel-pt.h` for details. Note that errors returned by +`pt_blk_next()` apply after the last instruction in the provided block. + +It is recommended to use a traced image section cache so the image section +identifier contained in a block can be used for reading the memory containing +the instructions in the block. This also allows mapping the instructions back +to source code using the debug information contained in or reachable via the +binary file. + +In some cases, the last instruction in a block may cross image section +boundaries. This can happen when a code segment is split into more than one +image section. The block is marked truncated in this case and provides the raw +bytes of the last instruction. + +The following example shows how instructions can be reconstructed from a block: + +~~~{.c} + struct pt_image_section_cache *iscache; + struct pt_block *block; + uint16_t ninsn; + uint64_t ip; + + ip = block->ip; + for (ninsn = 0; ninsn < block->ninsn; ++ninsn) { + uint8_t raw[pt_max_insn_size]; + insn; + int size; + + if (block->truncated && ((ninsn +1) == block->ninsn)) { + memcpy(raw, block->raw, block->size); + size = block->size; + } else { + size = pt_iscache_read(iscache, raw, sizeof(raw), block->isid, ip); + if (size < 0) + break; + } + + errcode = (&insn, raw, size, block->mode); + if (errcode < 0) + break; + + (&insn); + + ip = (&insn); + } +~~~ + + +## Parallel Decode + +Intel PT splits naturally into self-contained PSB segments that can be decoded +independently. Use the packet or query decoder to search for PSB's using +repeated calls to `pt_pkt_sync_forward()` and `pt_pkt_get_sync_offset()` (or +`pt_qry_sync_forward()` and `pt_qry_get_sync_offset()`). The following example +shows this using the query decoder, which will already give the IP needed in +the next step. + +~~~{.c} + struct pt_query_decoder *decoder; + uint64_t offset, ip; + int status, errcode; + + for (;;) { + status = pt_qry_sync_forward(decoder, &ip); + if (status < 0) + break; + + errcode = pt_qry_get_sync_offset(decoder, &offset); + if (errcode < 0) + (errcode); + + (offset, ip, status); + } +~~~ + +The individual trace segments can then be decoded using the query, instruction +flow, or block decoder as shown above in the previous examples. + +When stitching decoded trace segments together, a sequence of linear (in the +sense that it can be decoded without Intel PT) code has to be filled in. Use +the `pts_eos` status indication to stop decoding early enough. Then proceed +until the IP at the start of the succeeding trace segment is reached. When +using the instruction flow decoder, `pt_insn_next()` may be used for that as +shown in the following example: + +~~~{.c} + struct pt_insn_decoder *decoder; + struct pt_insn insn; + int status; + + for (;;) { + status = pt_insn_next(decoder, &insn, sizeof(insn)); + if (status < 0) + (status); + + if (status & pts_eos) + break; + + (&insn); + } + + while (insn.ip != ) { + (&insn); + + status = pt_insn_next(decoder, &insn, sizeof(insn)); + if (status < 0) + (status); + } +~~~ + + +## Threading + +The decoder library API is not thread-safe. Different threads may allocate and +use different decoder objects at the same time. Different decoders must not use +the same image object. Use `pt_image_copy()` to give each decoder its own copy +of a shared master image. diff --git a/doc/howto_pttc.md b/doc/howto_pttc.md new file mode 100755 index 0000000..9e46119 --- /dev/null +++ b/doc/howto_pttc.md @@ -0,0 +1,246 @@ +Testing the Intel(R) Processor Trace (Intel PT) Decoder Library and Samples {#pttc} +=========================================================================== + + + +This chapter documents how to use the pttc tool to generate and run tests. +Pttc takes a yasm assembly file and creates a Processor Trace stream from +special directives in its input. + + +Usage +----- + + $ pttc path/to/file.ptt + +If no error occurs, the following files will be generated in the current working +directory: + + file.lst + file.bin + file.pt + file.exp + +The `.lst` and `.bin` files are generated by a call to yasm. The `.pt` file +contains the Processor Trace and the `.exp` file contains the content of the +comments after the `.exp` directive. + +Pttc prints the filenames of the generated `.exp` files to stdout. + + +Syntax +------ + +Pttc allows annotations in the comments of yasm assembler source files. The +parser recognizes all comments that contain the `@pt` directive marker. + +Every pt directive can be preceded by a label name followed by a colon (`:`). +Refer to the description of the `.exp()` directive below on how to use these +labels. + +The general syntax for pt directives is as follows: + + @pt [label:]directive([arguments]) + + +### Directives + +This section lists the directives that are understood by pttc. + + +#### psb, psbend, pad, ovf, stop + + @pt psb() + @pt psbend() + @pt pad() + @pt ovf() + @pt stop() + +These packets do not have any arguments and correspond to the packets from the +specification. + + +#### tnt, tnt64 + + @pt tnt(args) + @pt tnt64(args) + +The arguments of the tnt and tnt64 packets is a list of Takens `t` and +Not-Takens `n`. For better readability an arbitrary number of blanks and dots +can be intervened. + +It is an error if no characters, only blanks or dots, or other characters are in +the payload. Additionally for the TNT packet and the TNT64 packet it is an error +to have more than 6 and more than 47 t's or n's in the payload, respectively. + + +#### tip, tip.pge, tip.pgd, fup + + @pt tip(ipc: addr) + @pt tip.pge(ipc: addr) + @pt tip.pgd(ipc: addr) + @pt fup(ipc: addr) + +These packets accept arbitrary addresses. `Addr` must be a parsable integer or a +valid label name. `Ipc` specifies the IP compression bits as integer number. + +If `addr` is given as a label, the address is truncated according to the IP +bytes value given in `ipc`. Otherwise the address needs to be a zero-extended +integer no bigger than specified in `ipc`. + + +#### mode.exec, mode.tsx + + @pt mode.exec(mode) + @pt mode.tsx(state) + +`Mode` must be either `16bit` or `32bit` or `64bit`; `state` must be `begin` or +`abort` or `commit`. + + +#### pip + + @pt pip(addr[, nr]) + +Addr is the value that was written to CR3. + +If nr is specified after addr, the non-root bit is set. + + +#### tsc + + @pt tsc(value) + +Value is the timestamp. + + +#### cbr + + @pt cbr(value) + +Value is the core/bus ratio. + + +#### tma + + @pt tma(ctc, fc) + +Ctc is the 16bit crystal clock component. +Fc is the 9bit fast counter component. + + +#### mtc + + @pt mtc(value) + +Value is the 8bit crystal clock component. + + +#### cyc + + @pt cyc(value) + +Value is the cycle count. + + +#### vmcs + + @pt vmcs(value) + +Value is the VMCS base address. Beware that only bits 12 to 51 will be used. +The rest will be silently ignored. + + +#### mnt + + @pt mnt(value) + +Value is the 8-byte packet payload represented as 64-bit little-endian number. + + +#### .exp + + @pt .exp([extra]) + +Every occurrence of this directive prints all the lines, following this +directive, to a `file[-extra].exp`. + +The first occurrence of this directive stops processing of other directives. + +In order to have a valid yasm file, it is necessary to put the expected output +into yasm comments (with the semi-colon character (`;`)). Any character up to +(and including) the semi-colon is not printed to the `.exp` file. Trailing white +space is removed from each line. + +Comments are made with the `#` character and go to the end of line. Comments +and whitespace before comments are not printed in the `.exp` file. + +Each line that contains no yasm comment at all is not printed to the exp file. +Empty lines can be used to structure the expected output text. + +In `.exp` files, the address of a yasm label can be substituted using: + + %[?0]label[.]. + + +Labels are prefixed with `%`, for example, `%%label`. A label name can consist +of alphanumeric characters and underscores. Labels must be unique. The address +of label will be substituted with a hex number including leading `0x`. + +Prefixing the label with `0`, for example `%0label`, prints the address with +leading zeroes using 16 hex digits plus the leading `0x`. + +The least significant `n` bytes of an address can be masked by appending `.n` to +the label. For example, `%%label.2` with `label` = `0xffffff004c` is printed as +`0x4c`. + +Prefixing the label with `?` in combination with masking replaces the masked out +parts with `?` using 16 digits for the address plus the leading `0x`. The +remaining number is zero extended. For example, `%?label.2` with `label` = +`0xc0001` is printed as `0x????????????0001`. + +The packet number of pt directives can also be substituted in the output. These +numbers are printed in decimal. The syntax is as follows: + + %label + + +### Special Labels + +There is a special label for the byte offset after the last packet: `%%eos`. + + +Labels in sections are relative to the section's vstart address. PTTC also adds +the following special section labels: + + * *section__start* gives the section's offset in the binary file + * *section__vstart* gives the virtual base address of the mapped section + * *section__length* gives the size of the section in bytes + +Beware that PTTC does not support switching back and forth between sections. diff --git a/doc/man/CMakeLists.txt b/doc/man/CMakeLists.txt new file mode 100644 index 0000000..16dea0a --- /dev/null +++ b/doc/man/CMakeLists.txt @@ -0,0 +1,144 @@ +# Copyright (c) 2015-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +file(MAKE_DIRECTORY ${MAN_OUTPUT_DIRECTORY}/man3) + +find_program(PANDOC pandoc + DOC "Path to pandoc; used for building man pages." +) + +function(add_man_page filename section function) + set(input ${CMAKE_CURRENT_SOURCE_DIR}/${filename}) + set(output ${MAN_OUTPUT_DIRECTORY}/man${section}/${function}.${section}) + + add_custom_command( + OUTPUT ${output} + COMMAND ${PANDOC} -s -f markdown -t man -o ${output} ${input} + MAIN_DEPENDENCY ${filename} + ) +endfunction(add_man_page) + +function(install_man_page section function) + install( + FILES ${MAN_OUTPUT_DIRECTORY}/man${section}/${function}.${section} + DESTINATION ${CMAKE_INSTALL_MANDIR}/man${section} + ) +endfunction(install_man_page) + +function(add_man_page_alias section function alias) + set(output ${MAN_OUTPUT_DIRECTORY}/man${section}/${alias}.${section}) + + file(WRITE ${output} ".so man${section}/${function}.${section}\n") + + install_man_page(${section} ${alias}) +endfunction(add_man_page_alias) + +set(MAN3_FUNCTIONS + pt_library_version + pt_config + pt_packet + pt_alloc_encoder + pt_enc_get_offset + pt_enc_get_config + pt_pkt_alloc_decoder + pt_pkt_sync_forward + pt_pkt_get_offset + pt_qry_alloc_decoder + pt_qry_sync_forward + pt_qry_get_offset + pt_qry_cond_branch + pt_qry_event + pt_qry_time + pt_image_alloc + pt_image_add_file + pt_image_remove_by_filename + pt_image_set_callback + pt_insn_alloc_decoder + pt_insn_sync_forward + pt_insn_get_offset + pt_insn_get_image + pt_insn_next + pt_iscache_alloc + pt_iscache_add_file + pt_iscache_read + pt_blk_alloc_decoder + pt_blk_sync_forward + pt_blk_get_offset + pt_blk_next +) + +foreach (function ${MAN3_FUNCTIONS}) + set(MAN_PAGES ${MAN_PAGES} ${MAN_OUTPUT_DIRECTORY}/man3/${function}.3) + + add_man_page(${function}.3.md 3 ${function}) + install_man_page(3 ${function}) +endforeach () + +add_man_page_alias(3 pt_config pt_cpu_errata) +add_man_page_alias(3 pt_packet pt_enc_next) +add_man_page_alias(3 pt_packet pt_pkt_next) +add_man_page_alias(3 pt_alloc_encoder pt_free_encoder) +add_man_page_alias(3 pt_enc_get_offset pt_enc_sync_set) +add_man_page_alias(3 pt_enc_get_config pt_pkt_get_config) +add_man_page_alias(3 pt_enc_get_config pt_qry_get_config) +add_man_page_alias(3 pt_enc_get_config pt_insn_get_config) +add_man_page_alias(3 pt_enc_get_config pt_blk_get_config) +add_man_page_alias(3 pt_pkt_alloc_decoder pt_pkt_free_decoder) +add_man_page_alias(3 pt_pkt_sync_forward pt_pkt_sync_backward) +add_man_page_alias(3 pt_pkt_sync_forward pt_pkt_sync_set) +add_man_page_alias(3 pt_pkt_get_offset pt_pkt_get_sync_offset) +add_man_page_alias(3 pt_qry_alloc_decoder pt_qry_free_decoder) +add_man_page_alias(3 pt_qry_sync_forward pt_qry_sync_backward) +add_man_page_alias(3 pt_qry_sync_forward pt_qry_sync_set) +add_man_page_alias(3 pt_qry_get_offset pt_qry_get_sync_offset) +add_man_page_alias(3 pt_qry_cond_branch pt_qry_indirect_branch) +add_man_page_alias(3 pt_qry_time pt_qry_core_bus_ratio) +add_man_page_alias(3 pt_qry_time pt_insn_time) +add_man_page_alias(3 pt_qry_time pt_insn_core_bus_ratio) +add_man_page_alias(3 pt_qry_time pt_blk_time) +add_man_page_alias(3 pt_qry_time pt_blk_core_bus_ratio) +add_man_page_alias(3 pt_image_alloc pt_image_free) +add_man_page_alias(3 pt_image_alloc pt_image_name) +add_man_page_alias(3 pt_image_add_file pt_image_copy) +add_man_page_alias(3 pt_image_add_file pt_image_add_cached) +add_man_page_alias(3 pt_image_remove_by_filename pt_image_remove_by_asid) +add_man_page_alias(3 pt_insn_alloc_decoder pt_insn_free_decoder) +add_man_page_alias(3 pt_insn_sync_forward pt_insn_sync_backward) +add_man_page_alias(3 pt_insn_sync_forward pt_insn_sync_set) +add_man_page_alias(3 pt_insn_get_offset pt_insn_get_sync_offset) +add_man_page_alias(3 pt_insn_get_image pt_insn_set_image) +add_man_page_alias(3 pt_insn_get_image pt_blk_get_image) +add_man_page_alias(3 pt_insn_get_image pt_blk_set_image) +add_man_page_alias(3 pt_insn_next pt_insn) +add_man_page_alias(3 pt_iscache_alloc pt_iscache_free) +add_man_page_alias(3 pt_iscache_alloc pt_iscache_name) +add_man_page_alias(3 pt_blk_alloc_decoder pt_blk_free_decoder) +add_man_page_alias(3 pt_blk_sync_forward pt_blk_sync_backward) +add_man_page_alias(3 pt_blk_sync_forward pt_blk_sync_set) +add_man_page_alias(3 pt_blk_get_offset pt_blk_get_sync_offset) +add_man_page_alias(3 pt_blk_next pt_block) + +add_custom_target(man ALL DEPENDS ${MAN_PAGES}) diff --git a/doc/man/pt_alloc_encoder.3.md b/doc/man/pt_alloc_encoder.3.md new file mode 100644 index 0000000..2c4e428 --- /dev/null +++ b/doc/man/pt_alloc_encoder.3.md @@ -0,0 +1,96 @@ +% PT_ALLOC_ENCODER(3) + + + +# NAME + +pt_alloc_encoder, pt_free_encoder - allocate/free an Intel(R) Processor Trace +packet encoder + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_packet_encoder \*** +| **pt_alloc_encoder(const struct pt_config \**config*);** +| +| **void pt_free_encoder(struct pt_packet_encoder \**encoder*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_alloc_encoder**() allocates a new Intel Processor Trace (Intel PT) packet +encoder and returns a pointer to it. The packet encoder generates Intel PT +trace from *pt_packet* objects. See **pt_enc_next**(3). + +The *config* argument points to a *pt_config* object. See **pt_config**(3). +The *config* argument will not be referenced by the returned encoder but the +trace buffer defined by the *config* argument's *begin* and *end* fields will. + +The returned packet encoder is initially synchronized onto the beginning of the +trace buffer specified in its *config* argument. Use **pt_enc_sync_set**(3) to +move it to any other position inside the trace buffer. + +**pt_free_encoder**() frees the Intel PT packet encoder pointed to by encoder*. +*The *encoder* argument must be NULL or point to an encoder that has been +*allocated by a call to **pt_alloc_encoder**(). + + +# RETURN VALUE + +**pt_alloc_encoder**() returns a pointer to a *pt_packet_encoder* object on +success or NULL in case of an error. + + +# EXAMPLE + +~~~{.c} +int foo(const struct pt_config *config) { + struct pt_packet_encoder *encoder; + errcode; + + encoder = pt_alloc_encoder(config); + if (!encoder) + return pte_nomem; + + errcode = bar(encoder); + + pt_free_encoder(encoder); + return errcode; +} +~~~ + + +# SEE ALSO + +**pt_config**(3), **pt_enc_sync_set**(3), **pt_enc_get_offset**(3), +**pt_enc_get_config**(3), **pt_enc_next**(3) diff --git a/doc/man/pt_blk_alloc_decoder.3.md b/doc/man/pt_blk_alloc_decoder.3.md new file mode 100644 index 0000000..7dbe7eb --- /dev/null +++ b/doc/man/pt_blk_alloc_decoder.3.md @@ -0,0 +1,98 @@ +% PT_BLK_ALLOC_DECODER(3) + + + +# NAME + +pt_blk_alloc_decoder, pt_blk_free_decoder - allocate/free an Intel(R) Processor +Trace block decoder + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_block_decoder \*** +| **pt_blk_alloc_decoder(const struct pt_config \**config*);** +| +| **void pt_blk_free_decoder(struct pt_block_decoder \**decoder*);** + +Link with *-lipt*. + + +# DESCRIPTION + +A block decoder decodes raw Intel Processor Trace (Intel PT) into a sequence of +blocks of instructions described by the *pt_block* structure. See +**pt_blk_next**(3). + +**pt_blk_alloc_decoder**() allocates a new block decoder and returns a pointer +to it. The *config* argument points to a *pt_config* object. See +**pt_config**(3). The *config* argument will not be referenced by the returned +decoder but the trace buffer defined by the *config* argument's *begin* and +*end* fields will. + +The returned block decoder needs to be synchronized onto the trace stream before +it can be used. To synchronize the decoder, use **pt_blk_sync_forward**(3), +**pt_blk_sync_backward**(3), or **pt_blk_sync_set**(3). + +**pt_blk_free_decoder**() frees the Intel PT block decoder pointed to by +*decoder*. The *decoder* argument must be NULL or point to a decoder that has +been allocated by a call to **pt_blk_alloc_decoder**(). + + +# RETURN VALUE + +**pt_blk_alloc_decoder**() returns a pointer to a *pt_block_decoder* object on +success or NULL in case of an error. + + +# EXAMPLE + +~~~{.c} + struct pt_block_decoder *decoder; + int errcode; + + decoder = pt_blk_alloc_decoder(config); + if (!decoder) + return pte_nomem; + + errcode = decode(decoder); + + pt_blk_free_decoder(decoder); + return errcode; +~~~ + + +# SEE ALSO + +**pt_config**(3), **pt_blk_sync_forward**(3), **pt_blk_sync_backward**(3), +**pt_blk_sync_set**(3), **pt_blk_get_offset**(3), **pt_blk_get_sync_offset**(3), +**pt_blk_get_image**(3), **pt_blk_set_image**(3), **pt_blk_get_config**(3), +**pt_blk_time**(3), **pt_blk_core_bus_ratio**(3), **pt_blk_next**(3) diff --git a/doc/man/pt_blk_get_offset.3.md b/doc/man/pt_blk_get_offset.3.md new file mode 100644 index 0000000..25daf6e --- /dev/null +++ b/doc/man/pt_blk_get_offset.3.md @@ -0,0 +1,82 @@ +% PT_BLK_GET_OFFSET(3) + + + +# NAME + +pt_blk_get_offset, pt_blk_get_sync_offset - get an Intel(R) Processor Trace +block decoder's current/synchronization trace buffer offset + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_blk_get_offset(struct pt_block_decoder \**decoder*,** +| **uint64_t \**offset*);** +| **int pt_blk_get_sync_offset(struct pt_block_decoder \**decoder*,** +| **uint64_t \**offset*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_blk_get_offset**() provides *decoder*'s current position as offset in +bytes from the beginning of *decoder*'s trace buffer in the unsigned integer +variable pointed to by *offset*. + +**pt_blk_get_sync_offset**() provides *decoder*'s last synchronization point as +offset in bytes from the beginning of *decoder*'s trace buffer in the unsigned +integer variable pointed to by *offset*. + + +# RETURN VALUE + +Both functions return zero on success or a negative *pt_error_code* enumeration +constant in case of an error. + + +# ERRORS + +pte_invalid +: The *decoder* or *offset* argument is NULL. + +pte_nosync +: *decoder* has not been synchronized onto the trace stream. Use + **pt_blk_sync_forward**(3), **pt_blk_sync_backward**(3), or + **pt_blk_sync_set**(3) to synchronize *decoder*. + + +# SEE ALSO + +**pt_blk_alloc_decoder**(3), **pt_blk_free_decoder**(3), +**pt_blk_sync_forward**(3), **pt_blk_sync_backward**(3), +**pt_blk_sync_set**(3), **pt_blk_get_config**(3), **pt_blk_time**(3), +**pt_blk_core_bus_ratio**(3), **pt_blk_next**(3) diff --git a/doc/man/pt_blk_next.3.md b/doc/man/pt_blk_next.3.md new file mode 100644 index 0000000..7aea244 --- /dev/null +++ b/doc/man/pt_blk_next.3.md @@ -0,0 +1,362 @@ +% PT_BLK_NEXT(3) + + + +# NAME + +pt_blk_next, pt_block - iterate over blocks of traced instructions + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_block;** +| +| **int pt_blk_next(struct pt_blk_decoder \**decoder*,** +| **struct pt_blk \**blk*, size_t *size*);** +| +| **int pt_blk_next(struct pt_block_decoder \**decoder*,** +| **struct pt_block \**block*, size_t *size*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_blk_next**() provides the next block of instructions in execution order, +which is described by the *pt_block* structure. + +The *size* argument must be set to *sizeof(struct pt_block)*. The function will +provide at most *size* bytes of the *pt_block* structure. A newer decoder +library may truncate an extended *pt_block* object to *size* bytes. + +An older decoder library may provide less *pt_block* fields. Fields that are +not provided will be zero-initialized. For fields where zero is a valid value +(e.g. for bit-fields), check the decoder library version to determine which +fields are valid. See **pt_library_version**(3). + +On success, the next block of instructions is provided in the *pt_block* object +pointed to by the *block* argument. The *pt_block* structure is declared as: + +~~~{.c} +/** A block of instructions. + * + * Instructions in this block are executed sequentially but are not necessarily + * contiguous in memory. Users are expected to follow direct branches. + */ +struct pt_block { + /** The IP of the first instruction in this block. */ + uint64_t ip; + + /** The IP of the last instruction in this block. + * + * This can be used for error-detection. + */ + uint64_t end_ip; + + /** The image section that contains the instructions in this block. + * + * A value of zero means that the section did not have an identifier. + * The section was not added via an image section cache or the memory + * was read via the read memory callback. + */ + int isid; + + /** The execution mode for all instructions in this block. */ + enum pt_exec_mode mode; + + /** The instruction class for the last instruction in this block. + * + * This field may be set to ptic_error to indicate that the instruction + * class is not available. The block decoder may choose to not provide + * the instruction class in some cases for performance reasons. + */ + enum pt_insn_class iclass; + + /** The number of instructions in this block. */ + uint16_t ninsn; + + /** The raw bytes of the last instruction in this block in case the + * instruction does not fit entirely into this block's section. + * + * This field is only valid if \@truncated is set. + */ + uint8_t raw[pt_max_insn_size]; + + /** The size of the last instruction in this block in bytes. + * + * This field is only valid if \@truncated is set. + */ + uint8_t size; + + /** A collection of flags giving additional information about the + * instructions in this block. + * + * - all instructions in this block were executed speculatively. + */ + uint32_t speculative:1; + + /** - speculative execution was aborted after this block. */ + uint32_t aborted:1; + + /** - speculative execution was committed after this block. */ + uint32_t committed:1; + + /** - tracing was disabled after this block. */ + uint32_t disabled:1; + + /** - tracing was enabled at this block. */ + uint32_t enabled:1; + + /** - tracing was resumed at this block. + * + * In addition to tracing being enabled, it continues from the IP + * at which tracing had been disabled before. + * + * If tracing was disabled at a call instruction, we assume that + * tracing will be re-enabled after returning from the call at the + * instruction following the call instruction. + */ + uint32_t resumed:1; + + /** - normal execution flow was interrupted after this block. */ + uint32_t interrupted:1; + + /** - tracing resumed at this block after an overflow. */ + uint32_t resynced:1; + + /** - tracing was stopped after this block. */ + uint32_t stopped:1; + + /** - the last instruction in this block is truncated. + * + * It starts in this block's section but continues in one or more + * other sections depending on how fragmented the memory image is. + * + * The raw bytes for the last instruction are provided in \@raw and + * its size in \@size in this case. + */ + uint32_t truncated:1; +}; +~~~ + +The fields of the *pt_block* structure are described in more detail below: + +ip +: The virtual address of the first instruction in the block. The address + should be interpreted in the current address space context. + +end_ip +: The virtual address of the last instruction in the block. The address + should be interpreted in the current address space context. + + This can be used for error detection. Reconstruction of the instructions in + a block should end with the last instruction at *end_ip*. + +isid +: The image section identifier of the section from which the block of + instructions originated. This will be zero unless the instructions came + from a section that was added via an image section cache. See + **pt_image_add_cached**(3). + + The image section identifier can be used for reading the memory containing + an instruction in order to decode it and for tracing an instruction back to + its binary file and from there to source code. + +mode +: The execution mode at which the instructions in the block were executed. + The *pt_exec_mode* enumeration is declared as: + +~~~{.c} +/** An execution mode. */ +enum pt_exec_mode { + ptem_unknown, + ptem_16bit, + ptem_32bit, + ptem_64bit +}; +~~~ + +iclass +: A coarse classification of the last instruction in the block. This may be + *ptic_error* to indicate that the classification is not available. + + The block decoder knows the instruction class of the instruction that ended + the block most of the time. If it does, it provides this information to + save the caller the effort of decoding the instruction in some cases. + +ninsn +: The number of instructions contained in this block. + + The instructions are sequential in the sense that no trace is required for + reconstructing them. They are not necessarily contiguous in memory. + + The IP of the first instruction is given in the *ip* field and the IP of + other instructions can be determined by decoding and examining the previous + instruction. + +raw +: If the last instruction of this block can not be read entirely from this + block's section, this field provides the instruction's raw bytes. + + It is only valid if the *truncated* flag is set. + +size +: If the last instruction of this block can not be read entirely from this + block's section, this field provides the instruction's size in bytes. + + It is only valid if the *truncated* flag is set. + +speculative +: A flag giving the speculative execution status of all instructions in the + block. If set, the instructions were executed speculatively. Otherwise, + the instructions were executed normally. + +aborted +: A flag saying whether speculative execution was aborted after the last + instruction in this block. If set, speculative execution was aborted and + the effect of speculatively executed instructions prior to and including + this block was discarded. + +committed +: A flag saying whether the speculative execution state was committed after + the last instruction in this block. If set, the effect of speculatively + executed instructions prior to and including this block was committed. + +disabled +: A flag saying that tracing was disabled after the last instruction in this + block. If set, tracing was disabled after the last instruction in this + block retired. + +enabled +: A flag saying whether tracing was enabled at the first instruction in this + block. If set, this is the first block of instructions after tracing was + enabled. + +resumed +: A flag saying whether tracing was resumed at the first instruction in this + block. If set, tracing was previously disabled at this block's IP before + executing the instruction at that IP and was then enabled at the same IP. + + A typical example would be a system call or interrupt when tracing only user + space. Tracing is disabled due to the context switch and is then resumed + from the next instruction after returning to user space. + +interrupted +: A flag saying whether normal execution flow was interrupted after the last + instruction in this block. If set, the normal execution flow was + interrupted. + + The next instruction, which is provided by another call to + **pt_blk_next**(), is the next instruction that retired after the + interrupt. This is not necessarily the interrupt's destination. + +resynced +: A flag saying whether tracing resumed at the fist instruction in this block + after an overflow. If set, there was an internal buffer overflow and + packets were lost. This was the first block of instructions to retire after + the overflow resolved. + +stopped +: A flag saying whether tracing was stopped after the last instruction in this + block. If set, this is the last block of instructions that retired before + tracing was stopped due to a TraceStop condition. + +truncated +: A flag saying whether the last instruction in this block can not be read + entirely from this block's section. Some bytes need to be read from one or + more other sections. This can happen when an image section is partially + overwritten by another image section. + + If set, the last instruction's memory is provided in *raw* and its size in + *size*. + + +# RETURN VALUE + +**pt_blk_next**() returns zero or a positive value on success or a negative +*pt_error_code* enumeration constant in case of an error. + +On success, a bit-vector of *pt_status_flag* enumeration constants is returned. +The *pt_status_flag* enumeration is declared as: + +~~~{.c} +/** Decoder status flags. */ +enum pt_status_flag { + /** There is an event pending. */ + pts_event_pending = 1 << 0, + + /** The address has been suppressed. */ + pts_ip_suppressed = 1 << 1, + + /** There is no more trace data available. */ + pts_eos = 1 << 2 +}; +~~~ + +The *pt_eos* flag indicates that the information contained in the Intel PT +stream has been consumed. Further calls to **pt_blk_next**() will continue to +provide blocks for instructions as long as the instruction's addresses can be +determined without further trace. + + +# ERRORS + +pte_invalid +: The *decoder* or *block* argument is NULL or the *size* argument is too + small. + +pte_eos +: Decode reached the end of the trace stream. + +pte_nosync +: The decoder has not been synchronized onto the trace stream. Use + **pt_blk_sync_forward**(3), **pt_blk_sync_backward**(3), or + **pt_blk_sync_set**(3) to synchronize *decoder*. + +pte_bad_opc +: The decoder encountered an unsupported Intel PT packet opcode. + +pte_bad_packet +: The decoder encountered an unsupported Intel PT packet payload. + +pte_bad_query +: Execution flow reconstruction and trace got out of sync. + + This typically means that, on its way to the virtual address of the next + event, the decoder encountered a conditional or indirect branch for which it + did not find guidance in the trace. + + +# SEE ALSO + +**pt_blk_alloc_decoder**(3), **pt_blk_free_decoder**(3), +**pt_blk_sync_forward**(3), **pt_blk_sync_backward**(3), +**pt_blk_sync_set**(3), **pt_blk_time**(3), **pt_blk_core_bus_ratio**(3) diff --git a/doc/man/pt_blk_sync_forward.3.md b/doc/man/pt_blk_sync_forward.3.md new file mode 100644 index 0000000..0fef362 --- /dev/null +++ b/doc/man/pt_blk_sync_forward.3.md @@ -0,0 +1,144 @@ +% PT_BLK_SYNC_FORWARD(3) + + + +# NAME + +pt_blk_sync_forward, pt_blk_sync_backward, pt_blk_sync_set - synchronize an +Intel(R) Processor Trace block decoder + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_blk_sync_forward(struct pt_block_decoder \**decoder*);** +| **int pt_blk_sync_backward(struct pt_block_decoder \**decoder*);** +| **int pt_blk_sync_set(struct pt_block_decoder \**decoder*,** +| **uint64_t *offset*);** + +Link with *-lipt*. + + +# DESCRIPTION + +These functions synchronize an Intel Processor Trace (Intel PT) block decoder +pointed to by *decoder* onto the trace stream in *decoder*'s trace buffer. + +They search for a Packet Stream Boundary (PSB) packet in the trace stream and, +if successful, set *decoder*'s current position and synchronization position to +that packet and start processing packets. For synchronization to be +successfull, there must be a full PSB+ header in the trace stream. + +**pt_blk_sync_forward**() searches in forward direction from *decoder*'s +current position towards the end of the trace buffer. If *decoder* has been +newly allocated and has not been synchronized yet, the search starts from the +beginning of the trace. + +**pt_blk_sync_backward**() searches in backward direction from *decoder*'s +current position towards the beginning of the trace buffer. If *decoder* has +been newly allocated and has not been synchronized yet, the search starts from +the end of the trace. + +**pt_blk_sync_set**() searches at *offset* bytes from the beginning of its +trace buffer. + + +# RETURN VALUE + +All synchronization functions return zero or a positive value on success or a +negative *pt_error_code* enumeration constant in case of an error. + +On success, a bit-vector of *pt_status_flag* enumeration constants is returned. +The *pt_status_flag* enumeration is declared as: + +~~~{.c} +/** Decoder status flags. */ +enum pt_status_flag { + /** There is an event pending. */ + pts_event_pending = 1 << 0, + + /** The address has been suppressed. */ + pts_ip_suppressed = 1 << 1, + + /** There is no more trace data available. */ + pts_eos = 1 << 2 +}; +~~~ + + +# ERRORS + +pte_invalid +: The *decoder* argument is NULL. + +pte_eos +: There is no (further) PSB+ header in the trace stream + (**pt_blk_sync_forward**() and **pt_blk_sync_backward**()) or at *offset* + bytes into the trace buffer (**pt_blk_sync_set**()). + +pte_nosync +: There is no PSB packet at *offset* bytes from the beginning of the trace + (**pt_blk_sync_set**() only). + +pte_bad_opc +: The decoder encountered an unsupported Intel PT packet opcode. + +pte_bad_packet +: The decoder encountered an unsupported Intel PT packet payload. + + +# EXAMPLE + +The following example re-synchronizes an Intel PT block decoder after decode +errors: + +~~~{.c} +int foo(struct pt_block_decoder *decoder) { + for (;;) { + int errcode; + + errcode = pt_blk_sync_forward(decoder); + if (errcode < 0) + return errcode; + + do { + errcode = decode(decoder); + } while (errcode >= 0); + } +} +~~~ + + +# SEE ALSO + +**pt_blk_alloc_decoder**(3), **pt_blk_free_decoder**(3), +**pt_blk_get_offset**(3), **pt_blk_get_sync_offset**(3), +**pt_blk_get_config**(3), **pt_blk_time**(3), **pt_blk_core_bus_ratio**(3), +**pt_blk_next**(3) diff --git a/doc/man/pt_config.3.md b/doc/man/pt_config.3.md new file mode 100644 index 0000000..28a8827 --- /dev/null +++ b/doc/man/pt_config.3.md @@ -0,0 +1,318 @@ +% PT_CONFIG(3) + + + +# NAME + +pt_config, pt_config_init, pt_cpu_errata - Intel(R) Processor Trace +encoder/decoder configuration + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_config;** +| +| **void pt_config_init(struct pt_config \**config*);** +| +| **int pt_cpu_errata(struct pt_errata \**errata*, const struct pt_cpu \**cpu*);** + +Link with *-lipt*. + + +# DESCRIPTION + +The *pt_config* structure defines an Intel Processor Trace (Intel PT) encoder or +decoder configuration. It is required for allocating a trace packet encoder +(see **pt_alloc_encoder**(3)), a trace packet decoder (see +**pt_pkt_alloc_decoder**(3)), a query decoder (see **pt_qry_alloc_decoder**(3)), +or an instruction flow decoder (see **pt_insn_alloc_decoder**(3)). + +**pt_config_init**() zero-initializes its *config* argument and sets *config*'s +*size* field to *sizeof(struct pt_config)*. + +**pt_cpu_errata**() enables workarounds for known errata in its *errata* +argument for the processor defined by its family/model/stepping in its *cpu* +argument. + + +The *pt_config* structure is declared as: + +~~~{.c} +/** An Intel PT decoder configuration. */ +struct pt_config { + /** The size of the config structure in bytes. */ + size_t size; + + /** The trace buffer begin address. */ + uint8_t *begin; + + /** The trace buffer end address. */ + uint8_t *end; + + /** An optional callback for handling unknown packets. + * + * If \@callback is not NULL, it is called for any unknown + * opcode. + */ + struct { + /** The callback function. + * + * It shall decode the packet at \@pos into \@unknown. + * It shall return the number of bytes read upon success. + * It shall return a negative pt_error_code otherwise. + * The below context is passed as \@context. + */ + int (*callback)(struct pt_packet_unknown *unknown, + const struct pt_config *config, + const uint8_t *pos, void *context); + + /** The user-defined context for this configuration. */ + void *context; + } decode; + + /** The cpu on which Intel PT has been recorded. */ + struct pt_cpu cpu; + + /** The errata to apply when encoding or decoding Intel PT. */ + struct pt_errata errata; + + /** The CTC frequency. + * + * This is only required if MTC packets have been enabled in + * IA32_RTIT_CTRL.MTCEn. + */ + uint32_t cpuid_0x15_eax, cpuid_0x15_ebx; + + /** The MTC frequency as defined in IA32_RTIT_CTL.MTCFreq. + * + * This is only required if MTC packets have been enabled in + * IA32_RTIT_CTRL.MTCEn. + */ + uint8_t mtc_freq; + + /** The nominal frequency as defined in + * MSR_PLATFORM_INFO[15:8]. + * + * This is only required if CYC packets have been enabled in + * IA32_RTIT_CTRL.CYCEn. + * + * If zero, timing calibration will only be able to use MTC + * and CYC packets. + * + * If not zero, timing calibration will also be able to use + * CBR packets. + */ + uint8_t nom_freq; + + /** A collection of decoder-specific flags. */ + struct pt_conf_flags flags; +}; +~~~ + +The fields of the *pt_config* structure are described in more detail below: + +size +: The size of the *pt_config* structure for backward and forward + compatibility. Set it to *sizeof(struct pt_config)*. + +begin, end +: The begin and end of a user-allocated memory buffer; *begin* points to + the first byte of the buffer, *end* points to one past the last byte in the + buffer. + + The packet encoder will generate Intel PT packets into the memory buffer. + + The decoders expect the buffer to contain raw Intel PT packets. They decode + directly from the buffer and expect the buffer to remain valid until the + decoder has been freed. + +decode +: An optional packet decode callback function. If *decode.callback* is not + NULL, it will be called for any unknown packet with the decoder + configuration, the current decoder position and with a user-defined context + provided in *callback.context* as arguments. + + If the callback function is able to decode the packet, it shall return the + size of the decoded packet and provide details in a *pt_packet_unknown* + object. + + If the packet cannot be decoded, the callback function shall return a + negative *pt_error_code* enumeration constant. + + The *pt_packet_unknown* object can be used to provide user-defined + information back to the user when using the packet decoder to iterate over + Intel PT packets. Other decoders ignore this information but will skip + the packet if a non-zero size is returned by the callback function. + +cpu +: The processor on which the trace has been collected or for which the trace + should be generated. The processor is identified by its family, model, and + stepping. + +~~~{.c} +/** A cpu vendor. */ +enum pt_cpu_vendor { + pcv_unknown, + pcv_intel +}; + +/** A cpu identifier. */ +struct pt_cpu { + /** The cpu vendor. */ + enum pt_cpu_vendor vendor; + + /** The cpu family. */ + uint16_t family; + + /** The cpu model. */ + uint8_t model; + + /** The stepping. */ + uint8_t stepping; +}; +~~~ + +errata +: The errata workarounds to be applied by the trace encoder or decoder that + is created using this configuration. + + The *pt_errata* structure is a collection of one-bit-fields, one for each + supported erratum. Duplicate errata are indicated by comments for the + erratum for which the workaround was first implemented. Set the field of an + erratum to enable the correspondig workaround. + + The *pt_errata* structure is declared as: + +~~~{.c} +/** A collection of Intel PT errata. */ +struct pt_errata { + /** BDM70: Intel(R) Processor Trace PSB+ Packets May Contain + * Unexpected Packets. + * + * Same as: SKD024. + * + * Some Intel Processor Trace packets should be issued only + * between TIP.PGE and TIP.PGD packets. Due to this erratum, + * when a TIP.PGE packet is generated it may be preceded by a + * PSB+ that incorrectly includes FUP and MODE.Exec packets. + */ + uint32_t bdm70:1; + + /** BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet + * May Be Recorded Following a Transactional Abort. + * + * Use of Intel(R) Transactional Synchronization Extensions + * (Intel(R) TSX) may result in a transactional abort. If an + * abort occurs immediately following a branch instruction, + * an incorrect branch target may be logged in an LBR (Last + * Branch Record) or in an Intel(R) Processor Trace (Intel(R) + * PT) packet before the LBR or Intel PT packet produced by + * the abort. + */ + uint32_t bdm64:1; + + [...] +}; +~~~ + +cpuid_0x15_eax, cpuid_0x15_ebx +: The values of *eax* and *ebx* on a *cpuid* call for leaf *0x15*. + + The value *ebx/eax* gives the ratio of the Core Crystal Clock (CTC) to + Timestamp Counter (TSC) frequency. + + This field is ignored by the packet encoder and packet decoder. It is + required for other decoders if Mini Time Counter (MTC) packets are enabled + in the collected trace. + +mtc_freq +: The Mini Time Counter (MTC) frequency as defined in *IA32_RTIT_CTL.MTCFreq*. + + This field is ignored by the packet encoder and packet decoder. It is + required for other decoders if Mini Time Counter (MTC) packets are enabled + in the collected trace. + +nom_freq +: The nominal or max non-turbo frequency. + + This field is ignored by the packet encoder and packet decoder. It is + used by other decoders if Cycle Count (CYC) packets are enabled to improve + timing calibration for cycle-accurate tracing. + + If the field is zero, the time tracking algorithm will use Mini Time + Counter (MTC) and Cycle Count (CYC) packets for calibration. + + If the field is non-zero, the time tracking algorithm will additionally be + able to calibrate at Core:Bus Ratio (CBR) packets. + +flags +: A collection of decoder-specific configuration flags. + + +# RETURN VALUE + +**pt_cpu_errata**() returns zero on success or a negative *pt_error_code* +enumeration constant otherwise. + + +# ERRORS + +**pt_cpu_errata**() may return the following errors: + +pte_invalid +: The *errata* or *cpu* argument is NULL. + + +# EXAMPLE + +~~~{.c} +int foo(uint8_t *trace_buffer, size_t size, struct pt_cpu cpu) { + struct pt_config config; + int errcode; + + pt_config_init(&config); + config.begin = trace_buffer; + config.end = trace_buffer + size; + config.cpu = cpu; + + errcode = pt_cpu_errata(&config.errata, &config.cpu); + if (errcode < 0) + return errcode; + + [...] +} +~~~ + + +# SEE ALSO + +**pt_alloc_encoder**(3), **pt_pkt_alloc_decoder**(3), +**pt_qry_alloc_decoder**(3), **pt_insn_alloc_decoder**(3) diff --git a/doc/man/pt_enc_get_config.3.md b/doc/man/pt_enc_get_config.3.md new file mode 100644 index 0000000..f0841b4 --- /dev/null +++ b/doc/man/pt_enc_get_config.3.md @@ -0,0 +1,77 @@ +% PT_ENC_GET_CONFIG(3) + + + +# NAME + +pt_enc_get_config, pt_pkt_get_config, pt_qry_get_config, pt_insn_get_config, +pt_blk_get_config - get an Intel(R) Processor Trace encoder/decoder's +configuration + + +# SYNOPSIS + +| **\#include ``** +| +| **const struct pt_config \*** +| **pt_enc_get_config(const struct pt_encoder \**encoder*);** +| +| **const struct pt_config \*** +| **pt_pkt_get_config(const struct pt_packet_decoder \**decoder*);** +| +| **const struct pt_config \*** +| **pt_qry_get_config(const struct pt_query_decoder \**decoder*);** +| +| **const struct pt_config \*** +| **pt_insn_get_config(const struct pt_insn_decoder \**decoder*);** +| +| **const struct pt_config \*** +| **pt_blk_get_config(const struct pt_block_decoder \**decoder*);** + +Link with *-lipt*. + + +# DESCRIPTION + +These functions return a pointer to their argument's configuration. The +returned configuration object must not be freed. It is valid as long as their +argument is not freed. + + +# RETURN VALUE + +These functions returns a pointer to a *pt_config* object. The returned pointer +is NULL if their argument is NULL. + + +# SEE ALSO + +**pt_config**(3), **pt_alloc_encoder**(3), **pt_pkt_alloc_decoder**(3), +**pt_qry_alloc_decoder**(3), **pt_insn_alloc_decoder**(3), +**pt_blk_alloc_decoder**(3) diff --git a/doc/man/pt_enc_get_offset.3.md b/doc/man/pt_enc_get_offset.3.md new file mode 100644 index 0000000..4767ca6 --- /dev/null +++ b/doc/man/pt_enc_get_offset.3.md @@ -0,0 +1,77 @@ +% PT_ENC_GET_OFFSET(3) + + + +# NAME + +pt_enc_get_offset, pt_enc_sync_set - get/set an Intel(R) Processor Trace packet +encoder's current trace buffer offset + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_enc_get_offset(struct pt_packet_encoder \**encoder*,** +| **uint64_t \**offset*);** +| **int pt_enc_sync_set(struct pt_packet_encoder \**encoder*,** +| **uint64_t *offset*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_enc_get_offset**() provides *encoder*'s current position as offset in bytes +from the beginning of *encoder*'s trace buffer in the unsigned integer variable +pointed to by *offset*. + +**pt_enc_sync_set**() sets *encoder*'s current position to *offset* bytes from +the beginning of its trace buffer. + + +# RETURN VALUE + +Both functions return zero on success or a negative *pt_error_code* enumeration +constant in case of an error. + + +# ERRORS + +pte_invalid +: The *encoder* or *offset* (for **pt_enc_sync_set**()) argument is NULL. + +pte_eos +: The *offset* argument is too big and the resulting position would be outside + of *encoder*'s trace buffer (**pt_enc_sync_set**() only). + + +# SEE ALSO + +**pt_enc_alloc_encoder**(3), **pt_enc_free_encoder**(3), **pt_enc_next**(3) diff --git a/doc/man/pt_image_add_file.3.md b/doc/man/pt_image_add_file.3.md new file mode 100644 index 0000000..73939f0 --- /dev/null +++ b/doc/man/pt_image_add_file.3.md @@ -0,0 +1,135 @@ +% PT_IMAGE_ADD_FILE(3) + + + +# NAME + +pt_image_add_file, pt_image_add_cached, pt_image_copy - add file sections to a +traced memory image descriptor + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_image_add_file(struct pt_image \**image*, const char \**filename*,** +| **uint64_t *offset*, uint64_t *size*,** +| **const struct pt_asid \**asid*, uint64_t *vaddr*);** +| **int pt_image_add_cached(struct pt_image \**image*,** +| **struct pt_image_section_cache \**iscache*,** +| **int *isid*, const struct pt_asid \**asid*);** +| **int pt_image_copy(struct pt_image \**image*,** +| **const struct pt_image \**src*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_image_add_file**() adds a new section to a *pt_image* object. The *image* +argument points to the *pt_image* object to which the new section is added. The +*filename* argument gives the absolute or relative path to the file on disk that +contains the section. The *offset* and *size* arguments define the section +within the file. The *size* argument is silently truncated to end the section +with the end of the underlying file. The *vaddr* argument gives the virtual +address at which the section is being loaded. + +**pt_image_add_cached**() adds a new section from an image section cache. See +**pt_iscache_add_file**(3). The *iscache* argument points to the +*pt_image_section_cache* object containing the section. The *isid* argument +gives the image section identifier for the desired section in that cache. + +The *asid* argument gives an optional address space identifier. If it is not +NULL, it points to a *pt_asid* structure, which is declared as: + +~~~{.c} +/** An Intel PT address space identifier. + * + * This identifies a particular address space when adding file + * sections or when reading memory. + */ +struct pt_asid { + /** The size of this object - set to sizeof(struct pt_asid). + */ + size_t size; + + /** The CR3 value. */ + uint64_t cr3; + + /** The VMCS Base address. */ + uint64_t vmcs; +}; +~~~ + +The *asid* argument can be used to prepare a collection of process, guest, and +hypervisor images to an Intel(R) Processor Trace (Intel PT) instruction flow +decoder. The decoder will select the current image based on CR3 and VMCS +information in the Intel PT trace. + +If only the CR3 or only the VMCS field should be considered by the decoder, +supply *pt_asid_no_cr3* and *pt_asid_no_vmcs* to the other field respectively. + +If the *asid* argument is NULL, the file section will be added for all +processes, guests, and hypervisor images. + +If the new section overlaps with an existing section, the existing section is +truncated or split to make room for the new section. + +**pt_image_copy**() adds file sections from the *pt_image* pointed to by the +*src* argument to the *pt_image* pointed to by the *dst* argument. + + +# RETURN VALUE + +**pt_image_add_file**() and **pt_image_add_cached**() return zero on success or +a negative *pt_error_code* enumeration constant in case of an error. + +**pt_image_copy**() returns the number of ignored sections on success or a +negative *pt_error_code* enumeration constant in case of an error. + + +# ERRORS + +pte_invalid +: The *image* or *filename* argument is NULL or the *offset* argument is too + big such that the section would start past the end of the file + (**pt_image_add_file**()). + The *image* or *iscache* argument is NULL (**pt_image_add_cached**()). + The *src* or *dst* argument is NULL (**pt_image_copy**()). + +pte_bad_image +: The *iscache* does not contain *isid* (**pt_image_add_cached**()). + + +# SEE ALSO + +**pt_image_alloc**(3), **pt_image_free**(3), +**pt_image_remove_by_filename**(3), **pt_image_remove_by_asid**(3), +**pt_image_set_callback**(3), **pt_insn_set_image**(3), +**pt_insn_get_image**(3), **pt_iscache_alloc**(3), **pt_iscache_add_file**(3) diff --git a/doc/man/pt_image_alloc.3.md b/doc/man/pt_image_alloc.3.md new file mode 100644 index 0000000..4ac2b72 --- /dev/null +++ b/doc/man/pt_image_alloc.3.md @@ -0,0 +1,99 @@ +% PT_IMAGE_ALLOC(3) + + + +# NAME + +pt_image_alloc, pt_image_free, pt_image_name - allocate/free a traced memory +image descriptor + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_image \*pt_image_alloc(const char \**name*);** +| **const char \*pt_image_name(const struct pt_image \**image*);** +| **void pt_image_free(struct pt_image \**image*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_image_alloc**() allocates a new *pt_image* and returns a pointer to it. A +*pt_image* object defines the memory image that was traced as a collection of +file sections and the virtual addresses at which those sections were loaded. + +The *name* argument points to an optional zero-terminated name string. If the +*name* argument is NULL, it will be ignored and the returned *pt_image* object +will not have a name. Otherwise, the returned *pt_image* object will have a +copy of the string pointed to by the *name* argument as name. + +**pt_image_name**() returns the name of the *pt_image* object the *image* +argument points to. + +**pt_image_free**() frees the *pt_image* object pointed to by *image*. The +*image* argument must be NULL or point to an image that has been allocated by a +call to **pt_image_alloc**(). + + +# RETURN VALUE + +**pt_image_alloc**() returns a pointer to a *pt_image* object on success or NULL +in case of an error. + +**pt_image_name**() returns a pointer to a zero-terminated string of NULL if the +image does not have a name. + + +# EXAMPLE + +~~~{.c} +int foo(const char *name) { + struct pt_image *image; + errcode; + + image = pt_image_alloc(name); + if (!image) + return pte_nomem; + + errcode = bar(image); + + pt_image_free(image); + return errcode; +} +~~~ + + +# SEE ALSO + +**pt_image_add_file**(3), **pt_image_add_cached**(3), **pt_image_copy**(3), +**pt_image_remove_by_filename**(3), **pt_image_remove_by_asid**(3), +**pt_image_set_callback**(3), **pt_insn_set_image**(3), **pt_insn_get_image**(3) diff --git a/doc/man/pt_image_remove_by_filename.3.md b/doc/man/pt_image_remove_by_filename.3.md new file mode 100644 index 0000000..e22a439 --- /dev/null +++ b/doc/man/pt_image_remove_by_filename.3.md @@ -0,0 +1,150 @@ +% PT_IMAGE_REMOVE_BY_FILENAME(3) + + + +# NAME + +pt_image_remove_by_filename, pt_image_remove_by_asid - remove sections from a +traced memory image descriptor + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_image_remove_by_filename(struct pt_image \**image*,** +| **const char \**filename*,** +| **const struct pt_asid \**asid*);** +| **int pt_image_remove_by_asid(struct pt_image \**image*,** +| **const struct pt_asid \**asid*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_image_remove_by_filename**() removes all sections from *image* that were +added by a call to **pt_image_add_file**(3) with an identical *filename* +argument or by a call to **pt_image_copy**(3) from such a section. Sections +that are based on the same underlying file but that were added using a different +*filename* argument are not removed. + +If the *asid* argument is not NULL, it removes only sections that were added +with a matching address-space identifier. See **pt_image_add_file**(3). + +**pt_image_remove_by_asid**(3) removes all sections from *image* that were added +by a call to **pt_image_add_file**(3) with a matching *asid* argument or by a +call to **pt_image_copy**(3) from such a section. See **pt_image_add_file**(3). + +Two *pt_asid* objects match in their "cr3* or *vmcs* field if one of them does +not provide the field (i.e. sets it to *pt_asid_no_cr3* or *pt_asid_no_vmcs* +respectively) or if the provided values are identical. Two *pt_asid* objects +match if they match in all fields. + + +# RETURN VALUE + +Both functions return the number of sections removed on success or a negative +*pt_error_code* enumeration constant in case of an error. + + +# ERRORS + +pte_invalid +: The *image* argument is NULL or the *filename* argument is NULL + (**pt_image_remove_by_filename**() only). + + +# EXAMPLE + +~~~{.c} +int foo(struct pt_image *image, uint64_t cr3) { + struct pt_asid asid1, asid2; + int errcode; + + pt_asid_init(&asid1); + asid1.cr3 = cr3; + + pt_asid_init(&asid2); + asid2.cr3 = ~cr3; + + errcode = pt_image_add_file(image, "/path/to/libfoo.so", + 0xa000, 0x100, &asid1, 0xb000); + if (errcode < 0) + return errcode; + + errcode = pt_image_add_file(image, "rel/path/to/libfoo.so", + 0xa000, 0x100, &asid1, 0xc000); + if (errcode < 0) + return errcode; + + /* This call would only remove the section added first: + * + * - filename matches only the first section's filename + * - NULL matches every asid + */ + (void) pt_image_remove_by_filename(image, + "/path/to/libfoo.so", + NULL); + + /* This call would not remove any of the above sections: + * + * - filename matches the first section's filename + * - asid2 does not match asid1 + */ + (void) pt_image_remove_by_filename(image, + "/path/to/libfoo.so", + &asid2); + + /* This call would not remove any of the above sections: + * + * - asid2 does not match asid1 + */ + (void) pt_image_remove_by_asid(image, &asid2); + + /* This call would remove both sections: + * + * - asid1 matches itself + */ + (void) pt_image_remove_by_asid(image, &asid1); + + /* This call would remove both sections: + * + * - NULL matches every asid + */ + (void) pt_image_remove_by_asid(image, NULL); +} +~~~ + + +# SEE ALSO + +**pt_image_alloc**(3), **pt_image_free**(3), **pt_image_add_file**(3), +**pt_image_add_cached**(3), **pt_image_copy**(3), **pt_insn_set_image**(3), +**pt_insn_get_image**(3) diff --git a/doc/man/pt_image_set_callback.3.md b/doc/man/pt_image_set_callback.3.md new file mode 100644 index 0000000..2ca8d7b --- /dev/null +++ b/doc/man/pt_image_set_callback.3.md @@ -0,0 +1,103 @@ +% PT_IMAGE_SET_CALLBACK(3) + + + +# NAME + +pt_image_set_callback - set a traced memory image read memory callback + + +# SYNOPSIS + +| **\#include ``** +| +| **typedef int (read_memory_callback_t)(uint8_t \**buffer*, size_t *size*,** +| **const struct pt_asid \**asid*,** +| **uint64_t *ip*, void \**context*);** +| +| **int pt_image_set_callback(struct pt_image \**image*,** +| **read_memory_callback_t \**callback*,** +| **void \**context*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_image_set_callback**() sets the function pointed to by *callback* as the +read-memory callback function in the *pt_image* object pointed to by *image*. +Any previous read-memory callback function is replaced. The read-memory +callback function can be removed by passing NULL as *callback* argument. + +When the Intel(R) Processor Trace (Intel PT) instruction flow decoder that is +using *image* tries to read memory from a location that is not contained in any +of the file sections in *image*, it calls the read-memory callback function with +the following arguments: + +buffer +: A pre-allocated memory buffer to hold the to-be-read memory. The callback + function shall provide the read memory in that buffer. + +size +: The size of the memory buffer pointed to by the *buffer* argument. + +asid +: The address-space identifier specifying the process, guest, or hypervisor, + in which context the *ip* argument is to be interpreted. See + **pt_image_add_file**(3). + +ip +: The virtual address from which *size* bytes of memory shall be read. + +context +: The *context* argument passed to **pt_image_set_callback**(). + +The callback function shall return the number of bytes read on success (no more +than *size*) or a negative *pt_error_code* enumeration constant in case of an +error. + + +# RETURN VALUE + +**pt_image_set_callback**() returns zero on success or a negative +*pt_error_code* enumeration constant in case of an error. + + +# ERRORS + +pte_invalid +: If the *image* argument is NULL. + + +# SEE ALSO + +**pt_image_alloc**(3), **pt_image_free**(3), **pt_image_add_file**(3), +**pt_image_add_cached**(3), pt_image_copy**(3), +**pt_image_remove_by_filename**(3), pt_image_remove_by_asid**(3), +**pt_insn_set_image**(3), pt_insn_get_image**(3) diff --git a/doc/man/pt_insn_alloc_decoder.3.md b/doc/man/pt_insn_alloc_decoder.3.md new file mode 100644 index 0000000..007fab4 --- /dev/null +++ b/doc/man/pt_insn_alloc_decoder.3.md @@ -0,0 +1,101 @@ +% PT_INSN_ALLOC_DECODER(3) + + + +# NAME + +pt_insn_alloc_decoder, pt_insn_free_decoder - allocate/free an Intel(R) +Processor Trace instruction flow decoder + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_insn_decoder \*** +| **pt_insn_alloc_decoder(const struct pt_config \**config*);** +| +| **void pt_insn_free_decoder(struct pt_insn_decoder \**decoder*);** + +Link with *-lipt*. + + +# DESCRIPTION + +An instruction flow decoder decodes raw Intel Processor Trace (Intel PT) into a +sequence of instructions described by the *pt_insn* structure. See +**pt_insn_next**(3). + +**pt_insn_alloc_decoder**() allocates a new instruction flow decoder and returns +a pointer to it. The *config* argument points to a *pt_config* object. See +**pt_config**(3). The *config* argument will not be referenced by the returned +decoder but the trace buffer defined by the *config* argument's *begin* and +*end* fields will. + +The returned instruction flow decoder needs to be synchronized onto the trace +stream before it can be used. To synchronize the instruction flow decoder, use +**pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3), or +**pt_insn_sync_set**(3). + +**pt_insn_free_decoder**() frees the Intel PT instruction flow decoder pointed +to by *decoder*. The *decoder* argument must be NULL or point to a decoder that +has been allocated by a call to **pt_insn_alloc_decoder**(). + + +# RETURN VALUE + +**pt_insn_alloc_decoder**() returns a pointer to a *pt_insn_decoder* object on +success or NULL in case of an error. + + +# EXAMPLE + +~~~{.c} +int foo(const struct pt_config *config) { + struct pt_insn_decoder *decoder; + errcode; + + decoder = pt_insn_alloc_decoder(config); + if (!decoder) + return pte_nomem; + + errcode = bar(decoder); + + pt_insn_free_decoder(decoder); + return errcode; +} +~~~ + + +# SEE ALSO + +**pt_config**(3), **pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3), +**pt_insn_sync_set**(3), **pt_insn_get_offset**(3), **pt_insn_get_sync_offset**(3), +**pt_insn_get_image**(3), **pt_insn_set_image**(3), **pt_insn_get_config**(3), +**pt_insn_time**(3), **pt_insn_core_bus_ratio**(3), **pt_insn_next**(3) diff --git a/doc/man/pt_insn_get_image.3.md b/doc/man/pt_insn_get_image.3.md new file mode 100644 index 0000000..9f64b52 --- /dev/null +++ b/doc/man/pt_insn_get_image.3.md @@ -0,0 +1,93 @@ +% PT_INSN_GET_IMAGE(3) + + + +# NAME + +pt_insn_get_image, pt_insn_set_image, pt_blk_get_image, pt_blk_set_image - +get/set an Intel(R) Processor Trace instruction flow or block decoder's traced +memory image descriptor + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_image \*pt_insn_get_image(struct pt_insn_decoder \**decoder*);** +| **struct pt_image \*pt_blk_get_image(struct pt_block_decoder \**decoder*);** +| +| **int pt_insn_set_image(struct pt_insn_decoder \**decoder*,** +| **struct pt_image \**image*);** +| **int pt_blk_set_image(struct pt_block_decoder \**decoder*,** +| **struct pt_image \**image*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_insn_get_image**() and **pt_blk_get_image**() return the traced memory +*image descriptor that decoder* uses for reading instruction memory. See +***pt_image_alloc**(3). Every decoder comes with a default *pt_image* object +*that is initially empty and that will automatically be destroyed when the +*decoder is freed. + +**pt_insn_set_image**() and **pt_blk_set_image**() set the traced memory image +descriptor that *decoder* uses for reading instruction memory. If the *image* +argument is NULL, sets *decoder*'s image to be its default image. The user is +responsible for freeing the *pt_image* object that *image* points to when it is +no longer needed. + + +# RETURN VALUE + +**pt_insn_get_image**() and **pt_blk_get_image**() return a pointer to +*decoder*'s *pt_image* object. The returned pointer is NULL if the *decoder* +argument is NULL. + +**pt_insn_set_image**() and **pt_blk_set_image**() return zero on success or a +negative *pt_error_code* enumeration constant in case of an error. + + +# ERRORS + +pte_invalid +: The *decoder* argument is NULL. + + +# NOTES + +One *pt_image* object must not be shared between multiple decoders. Use +**pt_image_copy**(3) to copy a common image. + + +# SEE ALSO + +**pt_insn_alloc_decoder**(3), **pt_insn_free_decoder**(3), **pt_insn_next**(3), +**pt_blk_alloc_decoder**(3), **pt_blk_free_decoder**(3), **pt_blk_next**(3) diff --git a/doc/man/pt_insn_get_offset.3.md b/doc/man/pt_insn_get_offset.3.md new file mode 100644 index 0000000..f8b0a5f --- /dev/null +++ b/doc/man/pt_insn_get_offset.3.md @@ -0,0 +1,82 @@ +% PT_INSN_GET_OFFSET(3) + + + +# NAME + +pt_insn_get_offset, pt_insn_get_sync_offset - get an Intel(R) Processor Trace +instruction flow decoder's current/synchronization trace buffer offset + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_insn_get_offset(struct pt_insn_decoder \**decoder*,** +| **uint64_t \**offset*);** +| **int pt_insn_get_sync_offset(struct pt_insn_decoder \**decoder*,** +| **uint64_t \**offset*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_insn_get_offset**() provides *decoder*'s current position as offset in +bytes from the beginning of *decoder*'s trace buffer in the unsigned integer +variable pointed to by *offset*. + +**pt_insn_get_sync_offset**() provides *decoder*'s last synchronization point as +offset in bytes from the beginning of *decoder*'s trace buffer in the unsigned +integer variable pointed to by *offset*. + + +# RETURN VALUE + +Both functions return zero on success or a negative *pt_error_code* enumeration +constant in case of an error. + + +# ERRORS + +pte_invalid +: The *decoder* or *offset* argument is NULL. + +pte_nosync +: *decoder* has not been synchronized onto the trace stream. Use + **pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3), or + **pt_insn_sync_set**(3) to synchronize *decoder*. + + +# SEE ALSO + +**pt_insn_alloc_decoder**(3), **pt_insn_free_decoder**(3), +**pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3), +**pt_insn_sync_set**(3), **pt_insn_get_config**(3), **pt_insn_time**(3), +**pt_insn_core_bus_ratio**(3), **pt_insn_next**(3) diff --git a/doc/man/pt_insn_next.3.md b/doc/man/pt_insn_next.3.md new file mode 100644 index 0000000..695b8cf --- /dev/null +++ b/doc/man/pt_insn_next.3.md @@ -0,0 +1,340 @@ +% PT_INSN_NEXT(3) + + + +# NAME + +pt_insn_next, pt_insn - iterate over traced instructions + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_insn;** +| +| **int pt_insn_next(struct pt_insn_decoder \**decoder*,** +| **struct pt_insn \**insn*, size_t *size*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_insn_next**() provides the next instruction in execution order, which is +described by the *pt_insn* structure. + +The *size* argument must be set to *sizeof(struct pt_insn)*. The function will +provide at most *size* bytes of the *pt_insn* structure. A newer decoder +library may truncate an extended *pt_insn* object to *size* bytes. + +An older decoder library may provide less *pt_insn* fields. Fields that are not +provided will be zero-initialized. For fields where zero is a valid value +(e.g. for bit-fields), check the decoder library version to determine which +fields are valid. See **pt_library_version**(3). + +On success, the next instruction is provided in the *pt_insn* object pointed to +by the *insn* argument. The *pt_insn* structure is declared as: + +~~~{.c} +/** A single traced instruction. */ +struct pt_insn { + /** The virtual address in its process. */ + uint64_t ip; + + /** A coarse classification. */ + enum pt_insn_class iclass; + + /** The execution mode. */ + enum pt_exec_mode mode; + + /** The raw bytes. */ + uint8_t raw[pt_max_insn_size]; + + /** The size in bytes. */ + uint8_t size; + + /** A collection of flags giving additional information: + * + * - the instruction was executed speculatively. + */ + uint32_t speculative:1; + + /** - speculative execution was aborted after this + * instruction. + */ + uint32_t aborted:1; + + /** - speculative execution was committed after this + * instruction. + */ + uint32_t committed:1; + + /** - tracing was disabled after this instruction. */ + uint32_t disabled:1; + + /** - tracing was enabled at this instruction. */ + uint32_t enabled:1; + + /** - tracing was resumed at this instruction. + * + * In addition to tracing being enabled, it continues + * from the IP at which tracing had been disabled before. + */ + uint32_t resumed:1; + + /** - normal execution flow was interrupted after this + * instruction. + */ + uint32_t interrupted:1; + + /** - tracing resumed at this instruction after an + * overflow. + */ + uint32_t resynced:1; + + /** - tracing was stopped after this instruction. */ + uint32_t stopped:1; + + /** - this instruction is truncated in its image section. + * + * It starts in the image section identified by \@isid and continues + * in one or more other sections. + */ + uint32_t truncated:1; + + /** The image section identifier for the section containing this + * instruction. + * + * A value of zero means that the section did not have an identifier. + * The section was not added via an image section cache or the memory + * was read via the read memory callback. + */ + int isid; +}; +~~~ + +The fields of the *pt_insn* structure are described in more detail below: + +ip +: The virtual address of the instruction. The address should be interpreted + in the current address space context. + +iclass +: A coarse classification of the instruction suitable for constructing a call + back trace. The *pt_insn_class* enumeration is declared as: + +~~~{.c} +/** The instruction class. + * + * We provide only a very coarse classification suitable for + * reconstructing the execution flow. + */ +enum pt_insn_class { + /* The instruction could not be classified. */ + ptic_error, + + /* The instruction is something not listed below. */ + ptic_other, + + /* The instruction is a near (function) call. */ + ptic_call, + + /* The instruction is a near (function) return. */ + ptic_return, + + /* The instruction is a near unconditional jump. */ + ptic_jump, + + /* The instruction is a near conditional jump. */ + ptic_cond_jump, + + /* The instruction is a call-like far transfer. + * E.g. SYSCALL, SYSENTER, or FAR CALL. + */ + ptic_far_call, + + /* The instruction is a return-like far transfer. + * E.g. SYSRET, SYSEXIT, IRET, or FAR RET. + */ + ptic_far_return, + + /* The instruction is a jump-like far transfer. + * E.g. FAR JMP. + */ + ptic_far_jump +}; +~~~ + +mode +: The execution mode at which the instruction was executed. The + *pt_exec_mode* enumeration is declared as: + +~~~{.c} +/** An execution mode. */ +enum pt_exec_mode { + ptem_unknown, + ptem_16bit, + ptem_32bit, + ptem_64bit +}; +~~~ + +raw +: The memory containing the instruction. + +size +: The size of the instruction in bytes. + +speculative +: A flag giving the speculative execution status of the instruction. If set, + the instruction was executed speculatively. Otherwise, the instruction was + executed normally. + +aborted +: A flag saying whether speculative execution was aborted after this + instruction. If set, speculative execution was aborted and the effect of + speculatively executed instructions prior to this was discarded. + +committed +: A flag saying whether the speculative execution state was committed. If + set, the effect of speculatively executed instructions prior to this was + committed. + +disabled +: A flag saying that tracing was disabled after this instruction. If set, + tracing was disabled after this instruction retired. + +enabled +: A flag saying whether tracing was enabled at this instruction. If set, this + is the first instruction that retired after tracing was enabled. + +resumed +: A flag saying whether tracing was resumed at this instruction. If set, + tracing was previously disabled at this instruction's IP before executing + this instruction and was then enabled at this instruction. + + A typical example would be a system call or interrupt when tracing only user + space. Tracing is disabled due to the context switch and is then resumed + from the next instruction after returning to user space. + +interrupted +: A flag saying whether normal execution flow was interrupted after this + instruction. If set, the normal execution flow was interrupted. + + The next instruction, which is provided by another call to + **pt_insn_next**(), is the next instruction that retired after the + interrupt. This is not necessarily the interrupt's destination. + +resynced +: A flag saying whether tracing resumed at this instruction after an + overflow. If set, there was an internal buffer overflow and packets were + lost. This was the first instruction to retire after the overflow resolved. + +stopped +: A flag saying whether tracing was stopped after this instruction. If set, + this is the last instruction that retired before tracing was stopped due to + a TraceStop condition. + +truncated +: A flag saying whether this instruction spans more than one image section. + If clear, this instruction originates from a single section identified by + *isid*. If set, the instruction overlaps two or more image sections. In + this case, *isid* identifies the section that contains the first byte. + +isid +: The image section identifier of the section from which the instruction + originated. This will be zero unless the instruction came from a section + that was added via an image section cache. See **pt_image_add_cached**(3). + + The image section identifier can be used to trace an instruction back to + its binary file and from there to source code. + + +# RETURN VALUE + +**pt_insn_next**() returns zero or a positive value on success or a negative +*pt_error_code* enumeration constant in case of an error. + +On success, a bit-vector of *pt_status_flag* enumeration constants is returned. +The *pt_status_flag* enumeration is declared as: + +~~~{.c} +/** Decoder status flags. */ +enum pt_status_flag { + /** There is an event pending. */ + pts_event_pending = 1 << 0, + + /** The address has been suppressed. */ + pts_ip_suppressed = 1 << 1, + + /** There is no more trace data available. */ + pts_eos = 1 << 2 +}; +~~~ + +The *pt_eos* flag indicates that the information contained in the Intel PT +stream has been consumed. Further calls to **pt_insn_next**() will continue to +provide instructions as long as the instruction's address can be determined +without further trace. + + +# ERRORS + +pte_invalid +: The *decoder* or *insn* argument is NULL or the *size* argument is too + small. + +pte_eos +: Decode reached the end of the trace stream. + +pte_nosync +: The decoder has not been synchronized onto the trace stream. Use + **pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3), or + **pt_insn_sync_set**(3) to synchronize *decoder*. + +pte_bad_opc +: The decoder encountered an unsupported Intel PT packet opcode. + +pte_bad_packet +: The decoder encountered an unsupported Intel PT packet payload. + +pte_bad_query +: Execution flow reconstruction and trace got out of sync. + + This typically means that, on its way to the virtual address of the next + event, the decoder encountered a conditional or indirect branch for which it + did not find guidance in the trace. + + +# SEE ALSO + +**pt_insn_alloc_decoder**(3), **pt_insn_free_decoder**(3), +**pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3), +**pt_insn_sync_set**(3), **pt_insn_time**(3), **pt_insn_core_bus_ratio**(3) diff --git a/doc/man/pt_insn_sync_forward.3.md b/doc/man/pt_insn_sync_forward.3.md new file mode 100644 index 0000000..77d915e --- /dev/null +++ b/doc/man/pt_insn_sync_forward.3.md @@ -0,0 +1,145 @@ +% PT_INSN_SYNC_FORWARD(3) + + + +# NAME + +pt_insn_sync_forward, pt_insn_sync_backward, pt_insn_sync_set - synchronize an +Intel(R) Processor Trace instruction flow decoder + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_insn_sync_forward(struct pt_insn_decoder \**decoder*);** +| **int pt_insn_sync_backward(struct pt_insn_decoder \**decoder*);** +| **int pt_insn_sync_set(struct pt_insn_decoder \**decoder*,** +| **uint64_t *offset*);** + +Link with *-lipt*. + + +# DESCRIPTION + +These functions synchronize an Intel Processor Trace (Intel PT) instruction flow +decoder pointed to by *decoder* onto the trace stream in *decoder*'s trace +buffer. + +They search for a Packet Stream Boundary (PSB) packet in the trace stream and, +if successful, set *decoder*'s current position and synchronization position to +that packet and start processing packets. For synchronization to be +successfull, there must be a full PSB+ header in the trace stream. + +**pt_insn_sync_forward**() searches in forward direction from *decoder*'s +current position towards the end of the trace buffer. If *decoder* has been +newly allocated and has not been synchronized yet, the search starts from the +beginning of the trace. + +**pt_insn_sync_backward**() searches in backward direction from *decoder*'s +current position towards the beginning of the trace buffer. If *decoder* has +been newly allocated and has not been synchronized yet, the search starts from +the end of the trace. + +**pt_insn_sync_set**() searches at *offset* bytes from the beginning of its +trace buffer. + + +# RETURN VALUE + +All synchronization functions return zero or a positive value on success or a +negative *pt_error_code* enumeration constant in case of an error. + +On success, a bit-vector of *pt_status_flag* enumeration constants is returned. +The *pt_status_flag* enumeration is declared as: + +~~~{.c} +/** Decoder status flags. */ +enum pt_status_flag { + /** There is an event pending. */ + pts_event_pending = 1 << 0, + + /** The address has been suppressed. */ + pts_ip_suppressed = 1 << 1, + + /** There is no more trace data available. */ + pts_eos = 1 << 2 +}; +~~~ + + +# ERRORS + +pte_invalid +: The *decoder* argument is NULL. + +pte_eos +: There is no (further) PSB+ header in the trace stream + (**pt_insn_sync_forward**() and **pt_insn_sync_backward**()) or at *offset* + bytes into the trace buffer (**pt_insn_sync_set**()). + +pte_nosync +: There is no PSB packet at *offset* bytes from the beginning of the trace + (**pt_insn_sync_set**() only). + +pte_bad_opc +: The decoder encountered an unsupported Intel PT packet opcode. + +pte_bad_packet +: The decoder encountered an unsupported Intel PT packet payload. + + +# EXAMPLE + +The following example re-synchronizes an Intel PT instruction flow decoder after +decode errors: + +~~~{.c} +int foo(struct pt_insn_decoder *decoder) { + for (;;) { + int errcode; + + errcode = pt_insn_sync_forward(decoder); + if (errcode < 0) + return errcode; + + do { + errcode = decode(decoder); + } while (errcode >= 0); + } +} +~~~ + + +# SEE ALSO + +**pt_insn_alloc_decoder**(3), **pt_insn_free_decoder**(3), +**pt_insn_get_offset**(3), **pt_insn_get_sync_offset**(3), +**pt_insn_get_config**(3), **pt_insn_time**(3), **pt_insn_core_bus_ratio**(3), +**pt_insn_next**(3) diff --git a/doc/man/pt_iscache_add_file.3.md b/doc/man/pt_iscache_add_file.3.md new file mode 100644 index 0000000..d6866fe --- /dev/null +++ b/doc/man/pt_iscache_add_file.3.md @@ -0,0 +1,98 @@ +% PT_ISCACHE_ADD_FILE(3) + + + +# NAME + +pt_iscache_add_file - add file sections to a traced memory image section cache + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_iscache_add_file(struct pt_image_section_cache \**iscache*,** +| **const char \**filename*, uint64_t *offset*,** +| **uint64_t *size*, uint64_t *vaddr*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_iscache_add_file**() adds a new section consisting of *size* bytes starting +at *offset* in *filename* loaded at *vaddr* to *iscache*. + +On success, **pt_iscache_add_file**() returns a positive integer identifier that +uniquely identifies the added section in that cache. This identifier can be +used to add sections from an image section cache to one or more traced memory +images. See **pt_image_add_cached**(3). Sections added from an image section +cache will be shared across images. It can also be used to read memory from the +cached section. See **pt_iscache_read**(3). + +If the cache already contains a suitable section, no section is added and the +identifier for the existing section is returned. If the cache already contains +a section that only differs in the load address, a new section is added that +shares the underlying file section. + + +# RETURN VALUE + +**pt_iscache_add_file**() returns a positive image section identifier on success +or a negative *pt_error_code* enumeration constant in case of an error. + + +# ERRORS + +pte_invalid +: The *iscache* or *filename* argument is NULL or the *offset* argument is too + big such that the section would start past the end of the file. + + +# EXAMPLE + +~~~{.c} +int add_file(struct pt_image_section_cache *iscache, struct pt_image *image, + const char *filename, uint64_t offset, uint64_t size, + uint64_t vaddr, const struct pt_asid *asid) { + int isid; + + isid = pt_iscache_add_file(iscache, filename, offset, size, vaddr); + if (isid < 0) + return isid; + + return pt_image_add_cached(image, iscache, isid, asid); +} +~~~ + + +# SEE ALSO + +**pt_iscache_alloc**(3), **pt_iscache_free**(3), **pt_iscache_read**(3), +**pt_image_add_cached**(3) diff --git a/doc/man/pt_iscache_alloc.3.md b/doc/man/pt_iscache_alloc.3.md new file mode 100644 index 0000000..484640d --- /dev/null +++ b/doc/man/pt_iscache_alloc.3.md @@ -0,0 +1,102 @@ +% PT_ISCACHE_ALLOC(3) + + + +# NAME + +pt_iscache_alloc, pt_iscache_free, pt_iscache_name - allocate/free a traced memory +image section cache + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_image_section_cache \*pt_iscache_alloc(const char \**name*);** +| **const char \*pt_iscache_name(const struct pt_image_section_cache \**iscache*);** +| **void pt_iscache_free(struct pt_image_section_cache \**iscache*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_iscache_alloc**() allocates a new *pt_image_section_cache* and returns a +pointer to it. A *pt_image_section_cache* object contains a collection of file +sections and the virtual addresses at which those sections were loaded. + +The image sections can be added to one or more *pt_image* objects. The +underlying file sections will be mapped once and their content will be shared +across images. + +The *name* argument points to an optional zero-terminated name string. If the +*name* argument is NULL, it will be ignored and the returned +*pt_image_section_cache* object will not have a name. Otherwise, the returned +*pt_image_section_object* object will have a copy of the string pointed to by +the *name* argument as name. + +**pt_iscache_name**() returns the name of the *pt_image_section_cache* object +the *iscache* argument points to. + +**pt_iscache_free**() frees the *pt_image_section_cache* object pointed to by +*iscache*. The *iscache* argument must be NULL or point to an image section +cache that has been allocated by a call to **pt_iscache_alloc**(). + + +# RETURN VALUE + +**pt_iscache_alloc**() returns a pointer to a *pt_image_section_cache* object +on success or NULL in case of an error. + +**pt_iscache_name**() returns a pointer to a zero-terminated string of NULL if the +image section cache does not have a name. + + +# EXAMPLE + +~~~{.c} +int foo(const char *name) { + struct pt_image_section_cache *iscache; + errcode; + + image = pt_iscache_alloc(name); + if (!iscache) + return pte_nomem; + + errcode = bar(iscache); + + pt_iscache_free(iscache); + return errcode; +} +~~~ + + +# SEE ALSO + +**pt_iscache_add_file**(3), **pt_image_add_cached**(3) diff --git a/doc/man/pt_iscache_read.3.md b/doc/man/pt_iscache_read.3.md new file mode 100644 index 0000000..fa7ea22 --- /dev/null +++ b/doc/man/pt_iscache_read.3.md @@ -0,0 +1,89 @@ +% PT_ISCACHE_READ(3) + + + +# NAME + +pt_iscache_read - read memory from a cached file section + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_iscache_read(struct pt_image_section_cache \**iscache*,** +| **uint8_t \**buffer*, uint64_t *size*, int *isid*,** +| **uint64_t *vaddr*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_iscache_read**() reads memory from a cached file section. The file section +must have previously been added by a call to **pt_iscache_add**(3). The +*iscache* argument points to the *pt_image_section_cache* object. It must be +the same that was used in the corresponding **pt_iscache_add**(3) call. The +*buffer* argument must point to a memory buffer of at least *size* bytes. The +*isid* argument identifies the file section from which memory is read. It must +be the same identifier that was returned from the corresponding +**pt_iscache_add**(3) call that added the file section to the cache. The *vaddr* +argument gives the virtual address from which *size* bytes of memory shall be +read. + +On success, **pt_iscache_read**() copies at most *size* bytes of memory from the +cached file section identified by *isid* in *iscache* starting at virtual +address *vaddr* into *buffer* and returns the number of bytes that were copied. + +Multiple calls to **pt_iscache_read**() may be necessary if *size* is bigger +than 4Kbyte or if the read crosses a section boundary. + + +# RETURN VALUE + +**pt_iscache_read**() returns the number of bytes that were read on success +or a negative *pt_error_code* enumeration constant in case of an error. + + +# ERRORS + +pte_invalid +: The *iscache* or *buffer* argument is NULL or the *size* argument is zero. + +pte_bad_image +: The *iscache* does not contain a section identified by *isid*. + +pte_nomap +: The *vaddr* argument lies outside of the virtual address range of the cached + section. + + +# SEE ALSO + +**pt_iscache_alloc**(3), **pt_iscache_free**(3), **pt_iscache_add**(3) diff --git a/doc/man/pt_library_version.3.md b/doc/man/pt_library_version.3.md new file mode 100644 index 0000000..ebc5de3 --- /dev/null +++ b/doc/man/pt_library_version.3.md @@ -0,0 +1,72 @@ +% PT_LIBRARY_VERSION(3) + + + +# NAME + +pt_library_version - version information + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_version pt_library_version();** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_library_version**() returns the decoder library version. + + +# RETURN VALUE + +**pt_library_version**() returns a *pt_version* structure which is declared as: + +~~~{.c} +/** The library version. */ +struct pt_version { + /** Major version number. */ + uint8_t major; + + /** Minor version number. */ + uint8_t minor; + + /** Reserved bits. */ + uint16_t reserved; + + /** Build number. */ + uint32_t build; + + /** Version extension. */ + const char *ext; +}; +~~~ diff --git a/doc/man/pt_packet.3.md b/doc/man/pt_packet.3.md new file mode 100644 index 0000000..7b1785c --- /dev/null +++ b/doc/man/pt_packet.3.md @@ -0,0 +1,197 @@ +% PT_PACKET(3) + + + +# NAME + +pt_packet, pt_enc_next, pt_pkt_next - encode/decode an Intel(R) Processor Trace +packet + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_packet;** +| +| **int pt_enc_next(struct pt_packet_encoder \**encoder*,** +| **const struct pt_packet \**packet*);** +| +| **int pt_pkt_next(struct pt_packet_decoder \**decoder*,** +| **struct pt_packet \**packet*, size_t *size*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_enc_next**() encodes its *packet* argument as Intel Processor Trace (Intel +PT) packet at *encoder*'s current position. On success, sets *encoder*'s +current position to point to the first byte after the encoded packet. + + +**pt_pkt_next**() decodes the Intel PT packet at decoder*'s current position +into *packet*. On success, sets *decoder*'s current position to point to the +first byte after the decoded packet. + +The caller is responsible for allocating and freeing the *pt_packet* object +pointed to be the *packet* argument. + +The *size* argument of **pt_pkt_next**() must be set to *sizeof(struct +pt_packet)*. The function will provide at most *size* bytes of packet data. A +newer decoder library may provide packet types that are not yet defined. Those +packets may be truncated. Unknown packet types should be ignored. + +If the packet decoder does not know the packet opcode at *decoder*'s current +position and if *decoder*'s configuration contains a packet decode callback +function, **pt_pkt_next**() will call that callback function to decode the +unknown packet. On success, a *ppt_unknown* packet type is provided with the +information provided by the decode callback function. + +An Intel PT packet is described by the *pt_packet* structure, which is declared +as: + +~~~{.c} +/** An Intel PT packet. */ +struct pt_packet { + /** The type of the packet. + * + * This also determines the \@variant field. + */ + enum pt_packet_type type; + + /** The size of the packet including opcode and payload. */ + uint8_t size; + + /** Packet specific data. */ + union { + /** Packets: pad, ovf, psb, psbend, stop - no payload. */ + + /** Packet: tnt-8, tnt-64. */ + struct pt_packet_tnt tnt; + + /** Packet: tip, fup, tip.pge, tip.pgd. */ + struct pt_packet_ip ip; + + /** Packet: mode. */ + struct pt_packet_mode mode; + + /** Packet: pip. */ + struct pt_packet_pip pip; + + /** Packet: tsc. */ + struct pt_packet_tsc tsc; + + /** Packet: cbr. */ + struct pt_packet_cbr cbr; + + /** Packet: tma. */ + struct pt_packet_tma tma; + + /** Packet: mtc. */ + struct pt_packet_mtc mtc; + + /** Packet: cyc. */ + struct pt_packet_cyc cyc; + + /** Packet: vmcs. */ + struct pt_packet_vmcs vmcs; + + /** Packet: mnt. */ + struct pt_packet_mnt mnt; + + /** Packet: unknown. */ + struct pt_packet_unknown unknown; + } payload; +}; +~~~ + +See the *intel-pt.h* header file for more detail. + + +# RETURN VALUE + +**pt_enc_next**() returns the number of bytes written on success or a negative +*pt_error_code* enumeration constant in case of an error. + +**pt_pkt_next**() returns the number of bytes consumed on success or a negative +*pt_error_code* enumeration constant in case of an error. + + +# ERRORS + +pte_invalid +: The *encoder*/*decoder* or *packet* argument is NULL or the *size* argument + is zero (**pt_pkt_next**() only). + +pte_nosync +: *decoder* has not been synchronized onto the trace stream (**pt_pkt_next**() + only). Use **pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3), or + **pt_pkt_sync_set**(3) to synchronize *decoder*. + +pte_eos +: Encode/decode has reached the end of the trace buffer. There is not enough + space in the trace buffer to generate *packet* (**pt_enc_next**()) or the + trace buffer does not contain a full Intel PT packet (**pt_pkt_next**()). + +pte_bad_opc +: The type of the *packet* argument is not supported (**pt_enc_next**()) or + the packet at *decoder*'s current position is not supported + (**pt_pkt_next**()). + +pte_bad_packet +: The payload or parts of the payload of the *packet* argument is not + supported (**pt_enc_next**()) or the packet at *decoder*'s current position + contains unsupported payload (**pt_pkt_next**()). + + +# EXAMPLE + +The example shows a typical Intel PT packet decode loop. + +~~~{.c} +int foo(struct pt_packet_decoder *decoder) { + for (;;) { + struct pt_packet packet; + int errcode; + + errcode = pt_pkt_next(decoder, &packet, sizeof(packet)); + if (errcode < 0) + return errcode; + + [...] + } +} +~~~ + + +# SEE ALSO + +**pt_alloc_encoder**(3), **pt_pkt_alloc_decoder**(3), +**pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3), **pt_pkt_sync_set**(3) diff --git a/doc/man/pt_pkt_alloc_decoder.3.md b/doc/man/pt_pkt_alloc_decoder.3.md new file mode 100644 index 0000000..60e5829 --- /dev/null +++ b/doc/man/pt_pkt_alloc_decoder.3.md @@ -0,0 +1,98 @@ +% PT_PKT_ALLOC_DECODER(3) + + + +# NAME + +pt_pkt_alloc_decoder, pt_pkt_free_decoder - allocate/free an Intel(R) Processor +Trace packet decoder + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_packet_decoder \*** +| **pt_pkt_alloc_decoder(const struct pt_config \**config*);** +| +| **void pt_pkt_free_decoder(struct pt_packet_decoder \**decoder*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_pkt_alloc_decoder**() allocates a new Intel Processor Trace (Intel PT) +packet decoder and returns a pointer to it. The packet decoder decodes raw +Intel PT trace into a stream of *pt_packet* objects. See **pt_pkt_next**(3). + +The *config* argument points to a *pt_config* object. See **pt_config**(3). +The *config* argument will not be referenced by the returned decoder but the +trace buffer defined by the *config* argument's *begin* and *end* fields will. + +The returned packet decoder needs to be synchronized onto the trace stream +before it can be used. To synchronize the packet decoder, use +**pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3), or +**pt_pkt_sync_set**(3). + +**pt_pkt_free_decoder**() frees the Intel PT packet decoder pointed to by +*decoder*. The *decoder* argument must be NULL or point to a decoder that has +been allocated by a call to **pt_pkt_alloc_decoder**(). + + +# RETURN VALUE + +**pt_pkt_alloc_decoder**() returns a pointer to a *pt_packet_decoder* object on +success or NULL in case of an error. + + +# EXAMPLE + +~~~{.c} +int foo(const struct pt_config *config) { + struct pt_packet_decoder *decoder; + errcode; + + decoder = pt_pkt_alloc_decoder(config); + if (!decoder) + return pte_nomem; + + errcode = bar(decoder); + + pt_pkt_free_decoder(decoder); + return errcode; +} +~~~ + + +# SEE ALSO + +**pt_config**(3), **pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3), +**pt_pkt_sync_set**(3), **pt_pkt_get_offset**(3), **pt_pkt_get_sync_offset**(3), +**pt_pkt_get_config**(3), **pt_pkt_next**(3) diff --git a/doc/man/pt_pkt_get_offset.3.md b/doc/man/pt_pkt_get_offset.3.md new file mode 100644 index 0000000..5d3c3f3 --- /dev/null +++ b/doc/man/pt_pkt_get_offset.3.md @@ -0,0 +1,81 @@ +% PT_PKT_GET_OFFSET(3) + + + +# NAME + +pt_pkt_get_offset, pt_pkt_get_sync_offset - get an Intel(R) Processor Trace +packet decoder's current/synchronization trace buffer offset + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_pkt_get_offset(struct pt_packet_decoder \**decoder*,** +| **uint64_t \**offset*);** +| **int pt_pkt_get_sync_offset(struct pt_packet_decoder \**decoder*,** +| **uint64_t \**offset*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_pkt_get_offset**() provides *decoder*'s current position as offset in bytes +from the beginning of *decoder*'s trace buffer in the unsigned integer variable +pointed to by *offset*. + +**pt_pkt_get_sync_offset**() provides *decoder*'s last synchronization point as +offset in bytes from the beginning of *decoder*'s trace buffer in the unsigned +integer variable pointed to by *offset*. + + +# RETURN VALUE + +Both functions return zero on success or a negative *pt_error_code* enumeration +constant in case of an error. + + +# ERRORS + +pte_invalid +: The *decoder* or *offset* argument is NULL. + +pte_nosync +: *decoder* has not been synchronized onto the trace stream. Use + **pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3), or + **pt_pkt_sync_set**(3) to synchronize *decoder*. + + +# SEE ALSO + +**pt_pkt_alloc_decoder**(3), **pt_pkt_free_decoder**(3), +**pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3), +**pt_pkt_sync_set**(3), **pt_pkt_next**(3) diff --git a/doc/man/pt_pkt_sync_forward.3.md b/doc/man/pt_pkt_sync_forward.3.md new file mode 100644 index 0000000..4e13de5 --- /dev/null +++ b/doc/man/pt_pkt_sync_forward.3.md @@ -0,0 +1,115 @@ +% PT_PKT_SYNC_FORWARD(3) + + + +# NAME + +pt_pkt_sync_forward, pt_pkt_sync_backward, pt_pkt_sync_set - synchronize an +Intel(R) Processor Trace packet decoder + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_pkt_sync_forward(struct pt_packet_decoder \**decoder*);** +| **int pt_pkt_sync_backward(struct pt_packet_decoder \**decoder*);** +| **int pt_pkt_sync_set(struct pt_packet_decoder \**decoder*,** +| **uint64_t *offset*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_pkt_sync_forward**() and **pt_pkt_sync_backward**() synchronize an Intel +Processor Trace (Intel PT) packet decoder pointed to by *decoder* onto the trace +stream in *decoder*'s trace buffer. They search for a Packet Stream Boundary +(PSB) packet in the trace stream and, if successful, set *decoder*'s current +position to that packet. + +**pt_pkt_sync_forward**() searches in forward direction from *decoder*'s current +position towards the end of the trace buffer. If *decoder* has been newly +allocated and has not been synchronized yet, the search starts from the +beginning of the trace. + +**pt_pkt_sync_backward**() searches in backward direction from *decoder*'s +current position towards the beginning of the trace buffer. If *decoder* has +been newly allocated and has not been synchronized yet, the search starts from +the end of the trace. + +**pt_pkt_sync_set**() sets *decoder*'s current position to *offset* bytes from +the beginning of its trace buffer. + + +# RETURN VALUE + +All synchronization functions return zero or a positive value on success or a +negative *pt_error_code* enumeration constant in case of an error. + + +# ERRORS + +pte_invalid +: The *decoder* argument is NULL. + +pte_eos +: There is no (further) PSB packet in the trace stream + (**pt_pkt_sync_forward**() and **pt_pkt_sync_backward**()) or the *offset* + argument is too big and the resulting position would be outside of + *decoder*'s trace buffer (**pt_pkt_sync_set**()). + + +# EXAMPLE + +The following example re-synchronizes an Intel PT packet decoder after decode +errors: + +~~~{.c} +int foo(struct pt_packet_decoder *decoder) { + for (;;) { + int errcode; + + errcode = pt_pkt_sync_forward(decoder); + if (errcode < 0) + return errcode; + + do { + errcode = decode(decoder); + } while (errcode >= 0); + } +} +~~~ + + +# SEE ALSO + +**pt_pkt_alloc_decoder**(3), **pt_pkt_free_decoder**(3), +**pt_pkt_get_offset**(3), **pt_pkt_get_sync_offset**(3), +**pt_pkt_get_config**(3), **pt_pkt_next**(3) diff --git a/doc/man/pt_qry_alloc_decoder.3.md b/doc/man/pt_qry_alloc_decoder.3.md new file mode 100644 index 0000000..c276bf5 --- /dev/null +++ b/doc/man/pt_qry_alloc_decoder.3.md @@ -0,0 +1,113 @@ +% PT_QRY_ALLOC_DECODER(3) + + + +# NAME + +pt_qry_alloc_decoder, pt_qry_free_decoder - allocate/free an Intel(R) Processor +Trace query decoder + + +# SYNOPSIS + +| **\#include ``** +| +| **struct pt_query_decoder \*** +| **pt_qry_alloc_decoder(const struct pt_config \**config*);** +| +| **void pt_qry_free_decoder(struct pt_query_decoder \**decoder*);** + +Link with *-lipt*. + + +# DESCRIPTION + +A query decoder decodes raw Intel Processor Trace (Intel PT) and provides +functions for querying the decoder about: + + - whether the next conditional branch was taken (or not taken) + - the destination of the next indirect branch + +This information can be used to reconstruct the execution flow of the traced +code. As long as the flow is clear, follow the flow. If the flow cannot be +determined by examining the current instruction, ask the query decoder. + +In addition, the query decoder indicates asynchronous events via the return +value of its query functions and provides an additional function to query for +such asynchronous events. See **pt_qry_cond_branch**(3), +**pt_qry_indirect_branch**(3), and **pt_qry_event**(3). + +**pt_qry_alloc_decoder**() allocates a new query decoder and returns a pointer +to it. The *config* argument points to a *pt_config* object. See +**pt_config**(3). The *config* argument will not be referenced by the returned +decoder but the trace buffer defined by the *config* argument's *begin* and +*end* fields will. + +The returned query decoder needs to be synchronized onto the trace stream +before it can be used. To synchronize the query decoder, use +**pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3), or +**pt_qry_sync_set**(3). + +**pt_qry_free_decoder**() frees the Intel PT query decoder pointed to by +*decoder*. The *decoder* argument must be NULL or point to a decoder that has +been allocated by a call to **pt_qry_alloc_decoder**(). + + +# RETURN VALUE + +**pt_qry_alloc_decoder**() returns a pointer to a *pt_query_decoder* object on +success or NULL in case of an error. + + +# EXAMPLE + +~~~{.c} +int foo(const struct pt_config *config) { + struct pt_query_decoder *decoder; + errcode; + + decoder = pt_qry_alloc_decoder(config); + if (!decoder) + return pte_nomem; + + errcode = bar(decoder); + + pt_qry_free_decoder(decoder); + return errcode; +} +~~~ + + +# SEE ALSO + +**pt_config**(3), **pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3), +**pt_qry_sync_set**(3), **pt_qry_get_offset**(3), **pt_qry_get_sync_offset**(3), +**pt_qry_get_config**(3), **pt_qry_cond_branch**(3), +**pt_qry_indirect_branch**(3), **pt_qry_event**(3), **pt_qry_time**(3), +**pt_qry_core_bus_ratio**(3) diff --git a/doc/man/pt_qry_cond_branch.3.md b/doc/man/pt_qry_cond_branch.3.md new file mode 100644 index 0000000..4a5775c --- /dev/null +++ b/doc/man/pt_qry_cond_branch.3.md @@ -0,0 +1,152 @@ +% PT_QRY_COND_BRANCH(3) + + + +# NAME + +pt_qry_cond_branch, pt_qry_indirect_branch - query an Intel(R) Processor Trace +query decoder + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_qry_cond_branch(struct pt_query_decoder \**decoder*,** +| **int \**taken*);** +| **int pt_qry_indirect_branch(struct pt_query_decoder \**decoder*,** +| **uint64_t \**ip*); + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_qry_cond_branch**() uses Intel Processor Trace (Intel PT) to determine +whether the next conditional branch in the traced code was taken or was not +taken. The *decoder* argument must point to an Intel PT query decoder. + +On success, sets the variable the *taken* argument points to a non-zero value +if the next condition branch is taken and to zero if it is not taken. + +**pt_qry_indirect_branch**() uses Intel Processor Trace (Intel PT) to determine +the destination virtual address of the next indirect branch in the traced code. + +On success, provides the destination address in the integer variable pointed to +be the *ip* argument. If the destination address has been suppressed in the +Intel PT trace, the lack of an IP is indicated in the return value by setting +the *pts_ip_suppressed* bit. + + +# RETURN VALUE + +Both functions return zero or a positive value on success or a negative +*pt_error_code* enumeration constant in case of an error. + +On success, a bit-vector of *pt_status_flag* enumeration constants is returned. +The *pt_status_flag* enumeration is declared as: + +~~~{.c} +/** Decoder status flags. */ +enum pt_status_flag { + /** There is an event pending. */ + pts_event_pending = 1 << 0, + + /** The address has been suppressed. */ + pts_ip_suppressed = 1 << 1, + + /** There is no more trace data available. */ + pts_eos = 1 << 2 +}; +~~~ + + +# ERRORS + +pte_invalid +: The *decoder* argument or the *taken* (**pt_qry_cond_branch**()) or *ip* + (**pt_qry_indirect_branch**()) argument is NULL. + +pte_eos +: Decode reached the end of the trace stream. + +pte_nosync +: The decoder has not been synchronized onto the trace stream. Use + **pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3), or + **pt_qry_sync_set**(3) to synchronize *decoder*. + +pte_bad_opc +: The decoder encountered an unsupported Intel PT packet opcode. + +pte_bad_packet +: The decoder encountered an unsupported Intel PT packet payload. + +pte_bad_query +: The query does not match the data provided in the Intel PT stream. Based on + the trace, the decoder expected a call to the other query function or a call + to **pt_qry_event**(3). This usually means that execution flow + reconstruction and trace got out of sync. + + +# EXAMPLE + +The following example sketches an execution flow reconstruction loop. +Asynchronous events have been omitted. + +~~~{.c} +int foo(struct pt_query_decoder *decoder, uint64_t ip) { + for (;;) { + if (insn_is_cond_branch(ip)) { + int errcode, taken; + + errcode = pt_qry_cond_branch(decoder, &taken); + if (errcode < 0) + return errcode; + + if (taken) + ip = insn_destination(ip); + else + ip = insn_next_ip(ip); + } else if (insn_is_indirect_branch(ip)) { + int errcode; + + errcode = pt_qry_indirect_branch(decoder, &ip); + if (errcode < 0) + return errcode; + } else + ip = insn_next_ip(ip); + } +} +~~~ + + +# SEE ALSO + +**pt_qry_alloc_decoder**(3), **pt_qry_free_decoder**(3), +**pt_qry_event**(3), **pt_qry_time**(3), **pt_qry_core_bus_ratio**(3) diff --git a/doc/man/pt_qry_event.3.md b/doc/man/pt_qry_event.3.md new file mode 100644 index 0000000..a23ec0d --- /dev/null +++ b/doc/man/pt_qry_event.3.md @@ -0,0 +1,254 @@ +% PT_QRY_EVENT(3) + + + +# NAME + +pt_qry_event - query an Intel(R) Processor Trace query decoder for an asynchronous event + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_qry_event(struct pt_query_decoder \**decoder*,** +| **struct pt_event \**event*, size_t *size*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_qry_event**() provides the next pending asynchronous event in *decoder*'s +Intel Processor Trace (Intel PT) decode in the *pt_event* object pointed to by +the *event* argument. + +The *size* argument must be set to *sizeof(struct pt_event)*. The function will +provide at most *size* bytes of the *pt_event* structure. A newer decoder +library may provide event types that are not yet defined. Those events may be +truncated. + +On success, detailed information about the event is provided in the *pt_event* +object pointed to by the *event* argument. The *pt_event* structure is declared +as: + +~~~{.c} +/** An event. */ +struct pt_event { + /** The type of the event. */ + enum pt_event_type type; + + /** A flag indicating that the event IP has been + * suppressed. + */ + uint32_t ip_suppressed:1; + + /** A flag indicating that the event is for status update. */ + uint32_t status_update:1; + + /** A flag indicating that the event has timing + * information. + */ + uint32_t has_tsc:1; + + /** The time stamp count of the event. + * + * This field is only valid if \@has_tsc is set. + */ + uint64_t tsc; + + /** The number of lost mtc and cyc packets. + * + * This gives an idea about the quality of the \@tsc. The + * more packets were dropped, the less precise timing is. + */ + uint32_t lost_mtc; + uint32_t lost_cyc; + + /* Reserved space for future extensions. */ + uint64_t reserved[2]; + + /** Event specific data. */ + union { + /** Event: enabled. */ + struct { + /** The address at which tracing resumes. */ + uint64_t ip; + } enabled; + + /** Event: disabled. */ + struct { + /** The destination of the first branch inside a + * filtered area. + * + * This field is not valid if \@ip_suppressed is set. + */ + uint64_t ip; + + /* The exact source ip needs to be determined using + * disassembly and the filter configuration. + */ + } disabled; + + [...] + } variant; +}; +~~~ + +See the *intel-pt.h* header file for more detail. The common fields of the +*pt_event* structure are described in more detail below: + +type +: The type of the event as a *pt_event_type* enumeration, which is declared + as: + +~~~{.c} +/** Event types. */ +enum pt_event_type { + /* Tracing has been enabled/disabled. */ + ptev_enabled, + ptev_disabled, + + /* Tracing has been disabled asynchronously. */ + ptev_async_disabled, + + /* An asynchronous branch, e.g. interrupt. */ + ptev_async_branch, + + /* A synchronous paging event. */ + ptev_paging, + + /* An asynchronous paging event. */ + ptev_async_paging, + + /* Trace overflow. */ + ptev_overflow, + + /* An execution mode change. */ + ptev_exec_mode, + + /* A transactional execution state change. */ + ptev_tsx, + + /* Trace Stop. */ + ptev_stop, + + /* A synchronous vmcs event. */ + ptev_vmcs, + + /* An asynchronous vmcs event. */ + ptev_async_vmcs +}; +~~~ + +ip_suppressed +: A flag indicating whether the *ip* field in the event-dependent part is not + valid because the value has been suppressed in the trace. + +status_update +: A flag indicating whether the event is for updating the decoder's status. + Status update events originate from Intel PT packets in PSB+. + +has_tsc +: A flag indicating that the event's timing-related fields *tsc*, *lost_mtc*, + and *lost_cyc* are valid. + +tsc +: The last time stamp count before the event. Depending on the timing + configuration, the timestamp can be more or less precise. For + cycle-accurate tracing, event packets are typically CYC-eligible so the + timestamp should be cycle-accurate. + +lost_mtc, lost_cyc +: The number of lost MTC and CYC updates. An update is lost if the decoder + was not able to process an MTC or CYC packet due to missing information. + This can be either missing calibration or missing configuration information. + The number of lost MTC and CYC updates gives a rough idea about the quality + of the *tsc* field. + +variant +: This field contains event-specific information. See the *intel-pt.h* header + file for details. + + +# RETURN VALUE + +**pt_qry_event**() returns zero or a positive value on success or a negative +*pt_error_code* enumeration constant in case of an error. + +On success, a bit-vector of *pt_status_flag* enumeration constants is returned. +The *pt_status_flag* enumeration is declared as: + +~~~{.c} +/** Decoder status flags. */ +enum pt_status_flag { + /** There is an event pending. */ + pts_event_pending = 1 << 0, + + /** The address has been suppressed. */ + pts_ip_suppressed = 1 << 1, + + /** There is no more trace data available. */ + pts_eos = 1 << 2 +}; +~~~ + + +# ERRORS + +pte_invalid +: The *decoder* or *event* argument is NULL or the *size* argument is too + small. + +pte_eos +: Decode reached the end of the trace stream. + +pte_nosync +: The decoder has not been synchronized onto the trace stream. Use + **pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3), or + **pt_qry_sync_set**(3) to synchronize *decoder*. + +pte_bad_opc +: The decoder encountered an unsupported Intel PT packet opcode. + +pte_bad_packet +: The decoder encountered an unsupported Intel PT packet payload. + +pte_bad_query +: The query does not match the data provided in the Intel PT stream. Based on + the trace, the decoder expected a call to **pt_qry_cond_branch**(3) or + **pt_qry_indirect_branch**(3). This usually means that execution flow + reconstruction and trace got out of sync. + + +# SEE ALSO + +**pt_qry_alloc_decoder**(3), **pt_qry_free_decoder**(3), +**pt_qry_cond_branch**(3), **pt_qry_indirect_branch**(3), **pt_qry_time**(3), +**pt_qry_core_bus_ratio**(3) diff --git a/doc/man/pt_qry_get_offset.3.md b/doc/man/pt_qry_get_offset.3.md new file mode 100644 index 0000000..199122d --- /dev/null +++ b/doc/man/pt_qry_get_offset.3.md @@ -0,0 +1,83 @@ +% PT_QRY_GET_OFFSET(3) + + + +# NAME + +pt_qry_get_offset, pt_qry_get_sync_offset - get an Intel(R) Processor Trace +query decoder's current/synchronization trace buffer offset + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_qry_get_offset(struct pt_query_decoder \**decoder*,** +| **uint64_t \**offset*);** +| **int pt_qry_get_sync_offset(struct pt_query_decoder \**decoder*,** +| **uint64_t \**offset*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_qry_get_offset**() provides *decoder*'s current position as offset in bytes +from the beginning of *decoder*'s trace buffer in the unsigned integer variable +pointed to by *offset*. + +**pt_qry_get_sync_offset**() provides *decoder*'s last synchronization point as +offset in bytes from the beginning of *decoder*'s trace buffer in the unsigned +integer variable pointed to by *offset*. + + +# RETURN VALUE + +Both functions return zero on success or a negative *pt_error_code* enumeration +constant in case of an error. + + +# ERRORS + +pte_invalid +: The *decoder* or *offset* argument is NULL. + +pte_nosync +: *decoder* has not been synchronized onto the trace stream. Use + **pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3), or + **pt_qry_sync_set**(3) to synchronize *decoder*. + + +# SEE ALSO + +**pt_qry_alloc_decoder**(3), **pt_qry_free_decoder**(3), +**pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3), +**pt_qry_sync_set**(3), **pt_qry_get_config**(3), **pt_qry_cond_branch**(3), +**pt_qry_indirect_branch**(3), **pt_qry_event**(3), **pt_qry_time**(3), +**pt_qry_core_bus_ratio**(3) diff --git a/doc/man/pt_qry_sync_forward.3.md b/doc/man/pt_qry_sync_forward.3.md new file mode 100644 index 0000000..4c9395c --- /dev/null +++ b/doc/man/pt_qry_sync_forward.3.md @@ -0,0 +1,152 @@ +% PT_QRY_SYNC_FORWARD(3) + + + +# NAME + +pt_qry_sync_forward, pt_qry_sync_backward, pt_qry_sync_set - synchronize an +Intel(R) Processor Trace query decoder + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_qry_sync_forward(struct pt_query_decoder \**decoder*,** +| **uint64_t \**ip*);** +| **int pt_qry_sync_backward(struct pt_query_decoder \**decoder*,** +| **uint64_t \**ip*);** +| **int pt_qry_sync_set(struct pt_query_decoder \**decoder*,** +| **uint64_t \**ip*, uint64_t *offset*);** + +Link with *-lipt*. + + +# DESCRIPTION + +These functions synchronize an Intel Processor Trace (Intel PT) query decoder +pointed to by *decoder* onto the trace stream in *decoder*'s trace buffer. + +They search for a Packet Stream Boundary (PSB) packet in the trace stream and, +if successful, set *decoder*'s current position and synchronization position to +that packet and start processing packets. For synchronization to be +successfull, there must be a full PSB+ header in the trace stream. + +If the *ip* argument is not NULL, these functions provide the code memory +address at which tracing starts in the variable pointed to by *ip*. If tracing +is disabled at the synchronization point, the lack of an IP is indicated in the +return value by setting the *pts_ip_suppressed* bit. + +**pt_qry_sync_forward**() searches in forward direction from *decoder*'s current +position towards the end of the trace buffer. If *decoder* has been newly +allocated and has not been synchronized yet, the search starts from the +beginning of the trace. + +**pt_qry_sync_backward**() searches in backward direction from *decoder*'s +current position towards the beginning of the trace buffer. If *decoder* has +been newly allocated and has not been synchronized yet, the search starts from +the end of the trace. + +**pt_qry_sync_set**() searches at *offset* bytes from the beginning of its trace +buffer. + + +# RETURN VALUE + +All synchronization functions return zero or a positive value on success or a +negative *pt_error_code* enumeration constant in case of an error. + +On success, a bit-vector of *pt_status_flag* enumeration constants is returned. +The *pt_status_flag* enumeration is declared as: + +~~~{.c} +/** Decoder status flags. */ +enum pt_status_flag { + /** There is an event pending. */ + pts_event_pending = 1 << 0, + + /** The address has been suppressed. */ + pts_ip_suppressed = 1 << 1, + + /** There is no more trace data available. */ + pts_eos = 1 << 2 +}; +~~~ + + +# ERRORS + +pte_invalid +: The *decoder* argument is NULL. + +pte_eos +: There is no (further) PSB+ header in the trace stream + (**pt_qry_sync_forward**() and **pt_qry_sync_backward**()) or at *offset* + bytes into the trace buffer (**pt_qry_sync_set**()). + +pte_nosync +: There is no PSB packet at *offset* bytes from the beginning of the trace + (**pt_qry_sync_set**() only). + +pte_bad_opc +: The decoder encountered an unsupported Intel PT packet opcode. + +pte_bad_packet +: The decoder encountered an unsupported Intel PT packet payload. + + +# EXAMPLE + +The following example re-synchronizes an Intel PT query decoder after decode +errors: + +~~~{.c} +int foo(struct pt_query_decoder *decoder) { + for (;;) { + int errcode; + + errcode = pt_qry_sync_forward(decoder); + if (errcode < 0) + return errcode; + + do { + errcode = decode(decoder); + } while (errcode >= 0); + } +} +~~~ + + +# SEE ALSO + +**pt_qry_alloc_decoder**(3), **pt_qry_free_decoder**(3), +**pt_qry_get_offset**(3), **pt_qry_get_sync_offset**(3), +**pt_qry_get_config**(3), **pt_qry_cond_branch**(3), +**pt_qry_indirect_branch**(3), **pt_qry_event**(3), **pt_qry_time**(3), +**pt_qry_core_bus_ratio**(3) diff --git a/doc/man/pt_qry_time.3.md b/doc/man/pt_qry_time.3.md new file mode 100644 index 0000000..f3676f6 --- /dev/null +++ b/doc/man/pt_qry_time.3.md @@ -0,0 +1,138 @@ +% PT_QRY_TIME(3) + + + +# NAME + +pt_qry_time, pt_qry_core_bus_ratio, pt_insn_time, pt_insn_core_bus_ratio, +pt_blk_time, pt_blk_core_bus_ratio - query an Intel(R) Processor Trace decoder +for timing information + + +# SYNOPSIS + +| **\#include ``** +| +| **int pt_qry_time(struct pt_query_decoder \**decoder*, uint64_t \**time*,** +| **uint32_t \**lost_mtc*, uint32_t \**lost_cyc*);** +| **int pt_qry_core_bus_ratio(struct pt_query_decoder \**decoder*,** +| **uint32_t \**cbr*);** +| +| **int pt_insn_time(struct pt_insn_decoder \**decoder*, uint64_t \**time*,** +| **uint32_t \**lost_mtc*, uint32_t \**lost_cyc*);** +| **int pt_insn_core_bus_ratio(struct pt_insn_decoder \**decoder*,** +| **uint32_t \**cbr*);** +| +| **int pt_blk_time(struct pt_block_decoder \**decoder*, uint64_t \**time*,** +| **uint32_t \**lost_mtc*, uint32_t \**lost_cyc*);** +| **int pt_blk_core_bus_ratio(struct pt_block_decoder \**decoder*,** +| **uint32_t \**cbr*);** + +Link with *-lipt*. + + +# DESCRIPTION + +**pt_qry_time**(), **pt_insn_time**(), and **pt_blk_time**() provide the current +estimated timestamp count (TSC) value in the unsigned integer variable pointed +to by the *time* argument. The returned value corresponds to what an **rdtsc** +instruction would have returned. + +At configurable intervals, Intel PT contains the full, accurate TSC value. +Between those intervals, the timestamp count is estimated using a collection of +lower-bandwidth packets, the Mini Time Counter (MTC) packet and the Cycle Count +Packet (CYC). Depending on the Intel PT configuration, timing can be very +precise at the cost of increased bandwidth or less precise but requiring lower +bandwidth. + +The decoder needs to be calibrated in order to translate Cycle Counter ticks +into Core Crystal Clock ticks. Without calibration, CYC packets need to be +dropped. The decoder calibrates itself using MTC, CYC, and CBR packets. + +To interpret MTC and CYC packets, the decoder needs additional information +provided in respective fields in the *pt_config* structure. Lacking this +information, MTC packets may need to be dropped. This will impact the precision +of the estimated timestamp count by losing periodic updates and it will impact +calibration, which may result in reduced precision for cycle-accurate timing. + +The number of dropped MTC and CYC packets gives a rough idea about the quality +of the estimated timestamp count. The value of dropped MTC and CYC packets is +given in the unsigned integer variables pointed to by the *lost_mtc* and +*lost_cyc* arguments respectively. If one or both of the arguments is NULL, no +information on lost packets is provided for the respective packet type. + +**pt_qry_core_bus_ratio**(), **pt_insn_core_bus_ratio**(), and +**pt_blk_core_bus_ratio**() give the last known core:bus ratio as provided by +the Core Bus Ratio (CBR) Intel PT packet. + + +# RETURN VALUE + +All functions return zero on success or a negative *pt_error_code* enumeration +constant in case of an error. + + +# ERRORS + +pte_invalid +: The *decoder* or *time* (**pt_qry_time**(), **pt_insn_time**(), and + **pt_blk_time**()) or *cbr* (**pt_qry_core_bus_ratio**(), + **pt_insn_core_bus_ratio**(), and **pt_blk_core_bus_ratio**()) argument is + NULL. + +pte_no_time +: There has not been a TSC packet to provide the full, accurate Time Stamp + Count. There may have been MTC or CYC packets, so the provided *time* may + be non-zero. It is zero if there has not been any timing packet yet. + + Depending on the Intel PT configuration, TSC packets may not have been + enabled. In this case, the *time* value provides the relative time based on + other timing packets. + +pte_no_cbr +: There has not been a CBR packet to provide the core:bus ratio. The *cbr* + value is undefined in this case. + + +# NOTES + +All decoders read ahead. The estimated timestamp count and core:bus ratios +correspond to their current decode position, which may be ahead of the trace +position that matches the last event, instruction, or block. + +The query decoder also provides an estimated timestamp count in the *pt_event* +structure. + + +# SEE ALSO + +**pt_qry_alloc_decoder**(3), **pt_qry_free_decoder**(3), +**pt_qry_cond_branch**(3), **pt_qry_indirect_branch**(3), **pt_qry_event**(3), +**pt_insn_alloc_decoder**(3), **pt_insn_free_decoder**(3), **pt_insn_next**(3), +**pt_blk_alloc_decoder**(3), **pt_blk_free_decoder**(3), **pt_blk_next**(3) diff --git a/include/posix/threads.h b/include/posix/threads.h new file mode 100644 index 0000000..46629e5 --- /dev/null +++ b/include/posix/threads.h @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * + * It looks like there is still no support for C11's threads.h. + * + * We implement the few features we actually need hoping that this file will + * soon go away. + */ + +#ifndef THREADS_H +#define THREADS_H + +#include + +#ifndef PTHREAD_MUTEX_NORMAL +# define PTHREAD_MUTEX_NORMAL PTHREAD_MUTEX_TIMED_NP +#endif + +#include +#include + +enum { + thrd_success = 1, + thrd_error +}; + +struct pt_thread { + pthread_t thread; +}; +typedef struct pt_thread thrd_t; + +typedef int (*thrd_start_t)(void *); + + +struct thrd_args { + thrd_start_t fun; + void *arg; +}; + +static void *thrd_routine(void *arg) +{ + struct thrd_args *args; + int result; + + args = arg; + if (!args) + return (void *) (intptr_t) -1; + + result = -1; + if (args->fun) + result = args->fun(args->arg); + + free(args); + + return (void *) (intptr_t) result; +} + +static inline int thrd_create(thrd_t *thrd, thrd_start_t fun, void *arg) +{ + struct thrd_args *args; + int errcode; + + if (!thrd || !fun) + return thrd_error; + + args = malloc(sizeof(*args)); + if (!args) + return thrd_error; + + args->fun = fun; + args->arg = arg; + + errcode = pthread_create(&thrd->thread, NULL, thrd_routine, args); + if (errcode) { + free(args); + return thrd_error; + } + + return thrd_success; +} + +static inline int thrd_join(thrd_t *thrd, int *res) +{ + void *result; + int errcode; + + if (!thrd) + return thrd_error; + + errcode = pthread_join(thrd->thread, &result); + if (errcode) + return thrd_error; + + if (res) + *res = (int) (intptr_t) result; + + return thrd_success; +} + + +struct pt_mutex { + pthread_mutex_t mutex; +}; +typedef struct pt_mutex mtx_t; + +enum { + mtx_plain = PTHREAD_MUTEX_NORMAL +}; + +static inline int mtx_init(mtx_t *mtx, int type) +{ + int errcode; + + if (!mtx || type != mtx_plain) + return thrd_error; + + errcode = pthread_mutex_init(&mtx->mutex, NULL); + if (errcode) + return thrd_error; + + return thrd_success; +} + +static inline void mtx_destroy(mtx_t *mtx) +{ + if (mtx) + (void) pthread_mutex_destroy(&mtx->mutex); +} + +static inline int mtx_lock(mtx_t *mtx) +{ + int errcode; + + if (!mtx) + return thrd_error; + + errcode = pthread_mutex_lock(&mtx->mutex); + if (errcode) + return thrd_error; + + return thrd_success; +} + +static inline int mtx_unlock(mtx_t *mtx) +{ + int errcode; + + if (!mtx) + return thrd_error; + + errcode = pthread_mutex_unlock(&mtx->mutex); + if (errcode) + return thrd_error; + + return thrd_success; +} + + +struct pt_cond { + pthread_cond_t cond; +}; +typedef struct pt_cond cnd_t; + +static inline int cnd_init(cnd_t *cnd) +{ + int errcode; + + if (!cnd) + return thrd_error; + + errcode = pthread_cond_init(&cnd->cond, NULL); + if (errcode) + return thrd_error; + + return thrd_success; +} + +static inline int cnd_destroy(cnd_t *cnd) +{ + int errcode; + + if (!cnd) + return thrd_error; + + errcode = pthread_cond_destroy(&cnd->cond); + if (errcode) + return thrd_error; + + return thrd_success; +} + +static inline int cnd_signal(cnd_t *cnd) +{ + int errcode; + + if (!cnd) + return thrd_error; + + errcode = pthread_cond_signal(&cnd->cond); + if (errcode) + return thrd_error; + + return thrd_success; +} + +static inline int cnd_broadcast(cnd_t *cnd) +{ + int errcode; + + if (!cnd) + return thrd_error; + + errcode = pthread_cond_broadcast(&cnd->cond); + if (errcode) + return thrd_error; + + return thrd_success; +} + +static inline int cnd_wait(cnd_t *cnd, mtx_t *mtx) +{ + int errcode; + + if (!cnd || !mtx) + return thrd_error; + + errcode = pthread_cond_wait(&cnd->cond, &mtx->mutex); + if (errcode) + return thrd_error; + + return thrd_success; +} + +#endif /* THREADS_H */ diff --git a/include/windows/inttypes.h b/include/windows/inttypes.h new file mode 100644 index 0000000..399011c --- /dev/null +++ b/include/windows/inttypes.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#ifndef PRId64 +# define PRId64 "lld" +#endif +#ifndef PRIu64 +# define PRIu64 "llu" +#endif +#ifndef PRIx64 +# define PRIx64 "llx" +#endif + +#ifndef PRId32 +# define PRId32 "d" +#endif +#ifndef PRIu32 +# define PRIu32 "u" +#endif +#ifndef PRIx32 +# define PRIx32 "x" +#endif + +#ifndef PRIu16 +# define PRIu16 "u" +#endif + +#ifndef PRIu8 +# define PRIu8 "u" +#endif +#ifndef PRIx8 +# define PRIx8 "x" +#endif + +#ifndef SCNx64 +# define SCNx64 "llx" +#endif diff --git a/include/windows/threads.h b/include/windows/threads.h new file mode 100644 index 0000000..48b4130 --- /dev/null +++ b/include/windows/threads.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * + * It looks like there is still no support for C11's threads.h. + * + * We implement the few features we actually need hoping that this file will + * soon go away. + */ + +#ifndef THREADS_H +#define THREADS_H + +#include "windows.h" + + +enum { + thrd_success = 1, + thrd_error +}; + + +struct pt_thread { + HANDLE handle; +}; +typedef struct pt_thread thrd_t; + +typedef int (*thrd_start_t)(void *); + + +struct thrd_args { + thrd_start_t fun; + void *arg; +}; + +static DWORD WINAPI thrd_routine(void *arg) +{ + struct thrd_args *args; + int result; + + args = (struct thrd_args *) arg; + if (!args) + return (DWORD) -1; + + result = -1; + if (args->fun) + result = args->fun(args->arg); + + free(args); + + return (DWORD) result; +} + +static inline int thrd_create(thrd_t *thrd, thrd_start_t fun, void *arg) +{ + struct thrd_args *args; + HANDLE handle; + + if (!thrd || !fun) + return thrd_error; + + args = malloc(sizeof(*args)); + if (!args) + return thrd_error; + + args->fun = fun; + args->arg = arg; + + handle = CreateThread(NULL, 0, thrd_routine, args, 0, NULL); + if (!handle) { + free(args); + return thrd_error; + } + + thrd->handle = handle; + return thrd_success; +} + +static inline int thrd_join(thrd_t *thrd, int *res) +{ + DWORD status; + BOOL success; + + if (!thrd) + return thrd_error; + + status = WaitForSingleObject(thrd->handle, INFINITE); + if (status) + return thrd_error; + + if (res) { + DWORD result; + + success = GetExitCodeThread(thrd->handle, &result); + if (!success) { + (void) CloseHandle(thrd->handle); + return thrd_error; + } + + *res = (int) result; + } + + success = CloseHandle(thrd->handle); + if (!success) + return thrd_error; + + return thrd_success; +} + +struct pt_mutex { + CRITICAL_SECTION cs; +}; +typedef struct pt_mutex mtx_t; + +enum { + mtx_plain +}; + +static inline int mtx_init(mtx_t *mtx, int type) +{ + if (!mtx || type != mtx_plain) + return thrd_error; + + InitializeCriticalSection(&mtx->cs); + + return thrd_success; +} + +static inline void mtx_destroy(mtx_t *mtx) +{ + if (mtx) + DeleteCriticalSection(&mtx->cs); +} + +static inline int mtx_lock(mtx_t *mtx) +{ + if (!mtx) + return thrd_error; + + EnterCriticalSection(&mtx->cs); + + return thrd_success; +} + +static inline int mtx_unlock(mtx_t *mtx) +{ + if (!mtx) + return thrd_error; + + LeaveCriticalSection(&mtx->cs); + + return thrd_success; +} + + +struct pt_cond { + CONDITION_VARIABLE cond; +}; +typedef struct pt_cond cnd_t; + +static inline int cnd_init(cnd_t *cnd) +{ + if (!cnd) + return thrd_error; + + InitializeConditionVariable(&cnd->cond); + + return thrd_success; +} + +static inline int cnd_destroy(cnd_t *cnd) +{ + if (!cnd) + return thrd_error; + + /* Nothing to do. */ + + return thrd_success; +} + +static inline int cnd_signal(cnd_t *cnd) +{ + if (!cnd) + return thrd_error; + + WakeConditionVariable(&cnd->cond); + + return thrd_success; +} + +static inline int cnd_broadcast(cnd_t *cnd) +{ + if (!cnd) + return thrd_error; + + WakeAllConditionVariable(&cnd->cond); + + return thrd_success; +} + +static inline int cnd_wait(cnd_t *cnd, mtx_t *mtx) +{ + BOOL success; + + if (!cnd || !mtx) + return thrd_error; + + success = SleepConditionVariableCS(&cnd->cond, &mtx->cs, INFINITE); + if (!success) + return thrd_error; + + return thrd_success; +} + +#endif /* THREADS_H */ diff --git a/libipt/CMakeLists.txt b/libipt/CMakeLists.txt new file mode 100644 index 0000000..146f8f5 --- /dev/null +++ b/libipt/CMakeLists.txt @@ -0,0 +1,171 @@ +# Copyright (c) 2013-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +include_directories( + internal/include +) + +set(LIBIPT_SECTION_FILES + src/pt_section.c + src/pt_section_file.c +) + +set(LIBIPT_FILES + src/pt_error.c + src/pt_packet_decoder.c + src/pt_query_decoder.c + src/pt_encoder.c + src/pt_sync.c + src/pt_version.c + src/pt_last_ip.c + src/pt_tnt_cache.c + src/pt_ild.c + src/pt_image.c + src/pt_image_section_cache.c + src/pt_retstack.c + src/pt_insn_decoder.c + src/pt_time.c + src/pt_asid.c + src/pt_event_queue.c + src/pt_packet.c + src/pt_decoder_function.c + src/pt_config.c + src/pt_insn.c + src/pt_block_decoder.c + src/pt_block_cache.c +) + +if (CMAKE_HOST_UNIX) + include_directories( + internal/include/posix + ) + + set(LIBIPT_FILES ${LIBIPT_FILES} src/posix/init.c) + set(LIBIPT_SECTION_FILES ${LIBIPT_SECTION_FILES} src/posix/pt_section_posix.c) +endif (CMAKE_HOST_UNIX) + +if (CMAKE_HOST_WIN32) + add_definitions( + # export libipt symbols + # + /Dpt_export=__declspec\(dllexport\) + ) + + include_directories( + internal/include/windows + ) + + set(LIBIPT_FILES ${LIBIPT_FILES} src/windows/init.c) + set(LIBIPT_SECTION_FILES ${LIBIPT_SECTION_FILES} src/windows/pt_section_windows.c) +endif (CMAKE_HOST_WIN32) + +set(LIBIPT_FILES ${LIBIPT_FILES} ${LIBIPT_SECTION_FILES}) + +add_library(libipt SHARED + ${LIBIPT_FILES} +) + +# put the version into the intel-pt header +# +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/include/intel-pt.h.in + ${CMAKE_CURRENT_BINARY_DIR}/include/intel-pt.h +) + +set_target_properties(libipt PROPERTIES + PREFIX "" + PUBLIC_HEADER ${CMAKE_CURRENT_BINARY_DIR}/include/intel-pt.h + VERSION ${PT_VERSION} + SOVERSION ${PT_VERSION_MAJOR} +) + +install(TARGETS libipt + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} +) + + +function(add_ptunit_std_test name) + add_ptunit_c_test(${name} src/pt_${name}.c ${ARGN}) +endfunction(add_ptunit_std_test) + + +add_ptunit_std_test(last_ip) +add_ptunit_std_test(tnt_cache) +add_ptunit_std_test(retstack) +add_ptunit_std_test(ild) +add_ptunit_std_test(cpu) +add_ptunit_std_test(time) +add_ptunit_std_test(asid) +add_ptunit_std_test(event_queue) +add_ptunit_std_test(image src/pt_asid.c) +add_ptunit_std_test(sync src/pt_packet.c) +add_ptunit_std_test(config) +add_ptunit_std_test(image_section_cache) +add_ptunit_std_test(block_cache) + +add_ptunit_c_test(mapped_section src/pt_asid.c) +add_ptunit_c_test(query + src/pt_encoder.c + src/pt_last_ip.c + src/pt_packet_decoder.c + src/pt_sync.c + src/pt_tnt_cache.c + src/pt_time.c + src/pt_event_queue.c + src/pt_query_decoder.c + src/pt_packet.c + src/pt_decoder_function.c + src/pt_packet_decoder.c + src/pt_config.c + ${LIBIPT_SECTION_FILES} + src/pt_time.c + src/pt_block_cache.c +) +add_ptunit_c_test(section ${LIBIPT_SECTION_FILES}) +add_ptunit_c_test(section-file + test/src/ptunit-section.c + src/pt_section.c + src/pt_section_file.c +) +add_ptunit_c_test(packet + src/pt_encoder.c + src/pt_packet_decoder.c + src/pt_sync.c + src/pt_packet.c + src/pt_decoder_function.c + src/pt_config.c +) +add_ptunit_c_test(fetch + src/pt_decoder_function.c + src/pt_encoder.c + src/pt_config.c +) + +add_ptunit_cpp_test(cpp) +add_ptunit_libraries(cpp libipt) diff --git a/libipt/include/intel-pt.h.in b/libipt/include/intel-pt.h.in new file mode 100644 index 0000000..2a8f3df --- /dev/null +++ b/libipt/include/intel-pt.h.in @@ -0,0 +1,2394 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef INTEL_PT_H +#define INTEL_PT_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +/* Intel(R) Processor Trace (Intel PT) decoder library. + * + * This file is logically structured into the following sections: + * + * - Version + * - Opcodes + * - Errors + * - Configuration + * - Packet encoder / decoder + * - Query decoder + * - Traced image + * - Instruction flow decoder + * - Block decoder + */ + + + +struct pt_encoder; +struct pt_packet_decoder; +struct pt_query_decoder; +struct pt_insn_decoder; +struct pt_block_decoder; + + + +/* A macro to mark functions as exported. */ +#ifndef pt_export +# if defined(__GNUC__) +# define pt_export __attribute__((visibility("default"))) +# elif defined(_MSC_VER) +# define pt_export __declspec(dllimport) +# else +# error "unknown compiler" +# endif +#endif + + + +/* Version. */ + + +/** The header version. */ +#define LIBIPT_VERSION_MAJOR ${PT_VERSION_MAJOR} +#define LIBIPT_VERSION_MINOR ${PT_VERSION_MINOR} + +#define LIBIPT_VERSION ((LIBIPT_VERSION_MAJOR << 8) + LIBIPT_VERSION_MINOR) + + +/** The library version. */ +struct pt_version { + /** Major version number. */ + uint8_t major; + + /** Minor version number. */ + uint8_t minor; + + /** Reserved bits. */ + uint16_t reserved; + + /** Build number. */ + uint32_t build; + + /** Version extension. */ + const char *ext; +}; + + +/** Return the library version. */ +extern pt_export struct pt_version pt_library_version(); + + + +/* Opcodes. */ + + + +/** A one byte opcode. */ +enum pt_opcode { + pt_opc_pad = 0x00, + pt_opc_ext = 0x02, + pt_opc_psb = pt_opc_ext, + pt_opc_tip = 0x0d, + pt_opc_tnt_8 = 0x00, + pt_opc_tip_pge = 0x11, + pt_opc_tip_pgd = 0x01, + pt_opc_fup = 0x1d, + pt_opc_mode = 0x99, + pt_opc_tsc = 0x19, + pt_opc_mtc = 0x59, + pt_opc_cyc = 0x03, + + /* A free opcode to trigger a decode fault. */ + pt_opc_bad = 0xd9 +}; + +/** A one byte extension code for ext opcodes. */ +enum pt_ext_code { + pt_ext_psb = 0x82, + pt_ext_tnt_64 = 0xa3, + pt_ext_pip = 0x43, + pt_ext_ovf = 0xf3, + pt_ext_psbend = 0x23, + pt_ext_cbr = 0x03, + pt_ext_tma = 0x73, + pt_ext_stop = 0x83, + pt_ext_vmcs = 0xc8, + pt_ext_ext2 = 0xc3, + + pt_ext_bad = 0x04 +}; + +/** A one byte extension 2 code for ext2 extension opcodes. */ +enum pt_ext2_code { + pt_ext2_mnt = 0x88, + + pt_ext2_bad = 0x00 +}; + +/** A one byte opcode mask. */ +enum pt_opcode_mask { + pt_opm_tip = 0x1f, + pt_opm_tnt_8 = 0x01, + pt_opm_tnt_8_shr = 1, + pt_opm_fup = pt_opm_tip, + + /* The bit mask for the compression bits in the opcode. */ + pt_opm_ipc = 0xe0, + + /* The shift right value for ipc bits. */ + pt_opm_ipc_shr = 5, + + /* The bit mask for the compression bits after shifting. */ + pt_opm_ipc_shr_mask = 0x7, + + /* Shift counts and masks for decoding the cyc packet. */ + pt_opm_cyc = 0x03, + pt_opm_cyc_ext = 0x04, + pt_opm_cyc_bits = 0xf8, + pt_opm_cyc_shr = 3, + pt_opm_cycx_ext = 0x01, + pt_opm_cycx_shr = 1 +}; + +/** The size of the various opcodes in bytes. */ +enum pt_opcode_size { + pt_opcs_pad = 1, + pt_opcs_tip = 1, + pt_opcs_tip_pge = 1, + pt_opcs_tip_pgd = 1, + pt_opcs_fup = 1, + pt_opcs_tnt_8 = 1, + pt_opcs_mode = 1, + pt_opcs_tsc = 1, + pt_opcs_mtc = 1, + pt_opcs_cyc = 1, + pt_opcs_psb = 2, + pt_opcs_psbend = 2, + pt_opcs_ovf = 2, + pt_opcs_pip = 2, + pt_opcs_tnt_64 = 2, + pt_opcs_cbr = 2, + pt_opcs_tma = 2, + pt_opcs_stop = 2, + pt_opcs_vmcs = 2, + pt_opcs_mnt = 3 +}; + +/** The psb magic payload. + * + * The payload is a repeating 2-byte pattern. + */ +enum pt_psb_pattern { + /* The high and low bytes in the pattern. */ + pt_psb_hi = pt_opc_psb, + pt_psb_lo = pt_ext_psb, + + /* Various combinations of the above parts. */ + pt_psb_lohi = pt_psb_lo | pt_psb_hi << 8, + pt_psb_hilo = pt_psb_hi | pt_psb_lo << 8, + + /* The repeat count of the payload, not including opc and ext. */ + pt_psb_repeat_count = 7, + + /* The size of the repeated pattern in bytes. */ + pt_psb_repeat_size = 2 +}; + +/** An execution mode. */ +enum pt_exec_mode { + ptem_unknown, + ptem_16bit, + ptem_32bit, + ptem_64bit +}; + +/** The payload details. */ +enum pt_payload { + /* The shift counts for post-processing the PIP payload. */ + pt_pl_pip_shr = 1, + pt_pl_pip_shl = 5, + + /* The size of a PIP payload in bytes. */ + pt_pl_pip_size = 6, + + /* The non-root bit in the first byte of the PIP payload. */ + pt_pl_pip_nr = 0x01, + + /* The size of a 8bit TNT packet's payload in bits. */ + pt_pl_tnt_8_bits = 8 - pt_opm_tnt_8_shr, + + /* The size of a 64bit TNT packet's payload in bytes. */ + pt_pl_tnt_64_size = 6, + + /* The size of a 64bit TNT packet's payload in bits. */ + pt_pl_tnt_64_bits = 48, + + /* The size of a TSC packet's payload in bytes and in bits. */ + pt_pl_tsc_size = 7, + pt_pl_tsc_bit_size = pt_pl_tsc_size * 8, + + /* The size of a CBR packet's payload in bytes. */ + pt_pl_cbr_size = 2, + + /* The size of a PSB packet's payload in bytes. */ + pt_pl_psb_size = pt_psb_repeat_count * pt_psb_repeat_size, + + /* The size of a MODE packet's payload in bytes. */ + pt_pl_mode_size = 1, + + /* The size of an IP packet's payload with update-16 compression. */ + pt_pl_ip_upd16_size = 2, + + /* The size of an IP packet's payload with update-32 compression. */ + pt_pl_ip_upd32_size = 4, + + /* The size of an IP packet's payload with update-48 compression. */ + pt_pl_ip_upd48_size = 6, + + /* The size of an IP packet's payload with sext-48 compression. */ + pt_pl_ip_sext48_size = 6, + + /* The size of an IP packet's payload with full-ip compression. */ + pt_pl_ip_full_size = 8, + + /* Byte locations, sizes, and masks for processing TMA packets. */ + pt_pl_tma_size = 5, + pt_pl_tma_ctc_size = 2, + pt_pl_tma_ctc_bit_size = pt_pl_tma_ctc_size * 8, + pt_pl_tma_ctc_0 = 2, + pt_pl_tma_ctc_1 = 3, + pt_pl_tma_ctc_mask = (1 << pt_pl_tma_ctc_bit_size) - 1, + pt_pl_tma_fc_size = 2, + pt_pl_tma_fc_bit_size = 9, + pt_pl_tma_fc_0 = 5, + pt_pl_tma_fc_1 = 6, + pt_pl_tma_fc_mask = (1 << pt_pl_tma_fc_bit_size) - 1, + + /* The size of a MTC packet's payload in bytes and in bits. */ + pt_pl_mtc_size = 1, + pt_pl_mtc_bit_size = pt_pl_mtc_size * 8, + + /* A mask for the MTC payload bits. */ + pt_pl_mtc_mask = (1 << pt_pl_mtc_bit_size) - 1, + + /* The maximal payload size in bytes of a CYC packet. */ + pt_pl_cyc_max_size = 15, + + /* The size of a VMCS packet's payload in bytes. */ + pt_pl_vmcs_size = 5, + + /* The shift counts for post-processing the VMCS payload. */ + pt_pl_vmcs_shl = 12, + + /* The size of a MNT packet's payload in bytes. */ + pt_pl_mnt_size = 8 +}; + +/** Mode packet masks. */ +enum pt_mode_mask { + pt_mom_leaf = 0xe0, + pt_mom_leaf_shr = 5, + pt_mom_bits = 0x1f +}; + +/** Mode packet leaves. */ +enum pt_mode_leaf { + pt_mol_exec = 0x00, + pt_mol_tsx = 0x20 +}; + +/** Mode packet bits. */ +enum pt_mode_bit { + /* mode.exec */ + pt_mob_exec_csl = 0x01, + pt_mob_exec_csd = 0x02, + + /* mode.tsx */ + pt_mob_tsx_intx = 0x01, + pt_mob_tsx_abrt = 0x02 +}; + +/** The IP compression. */ +enum pt_ip_compression { + /* The bits encode the payload size and the encoding scheme. + * + * No payload. The IP has been suppressed. + */ + pt_ipc_suppressed = 0x0, + + /* Payload: 16 bits. Update last IP. */ + pt_ipc_update_16 = 0x01, + + /* Payload: 32 bits. Update last IP. */ + pt_ipc_update_32 = 0x02, + + /* Payload: 48 bits. Sign extend to full address. */ + pt_ipc_sext_48 = 0x03, + + /* Payload: 48 bits. Update last IP. */ + pt_ipc_update_48 = 0x04, + + /* Payload: 64 bits. Full address. */ + pt_ipc_full = 0x06 +}; + +/** The size of the various packets in bytes. */ +enum pt_packet_size { + ptps_pad = pt_opcs_pad, + ptps_tnt_8 = pt_opcs_tnt_8, + ptps_mode = pt_opcs_mode + pt_pl_mode_size, + ptps_tsc = pt_opcs_tsc + pt_pl_tsc_size, + ptps_mtc = pt_opcs_mtc + pt_pl_mtc_size, + ptps_psb = pt_opcs_psb + pt_pl_psb_size, + ptps_psbend = pt_opcs_psbend, + ptps_ovf = pt_opcs_ovf, + ptps_pip = pt_opcs_pip + pt_pl_pip_size, + ptps_tnt_64 = pt_opcs_tnt_64 + pt_pl_tnt_64_size, + ptps_cbr = pt_opcs_cbr + pt_pl_cbr_size, + ptps_tip_supp = pt_opcs_tip, + ptps_tip_upd16 = pt_opcs_tip + pt_pl_ip_upd16_size, + ptps_tip_upd32 = pt_opcs_tip + pt_pl_ip_upd32_size, + ptps_tip_upd48 = pt_opcs_tip + pt_pl_ip_upd48_size, + ptps_tip_sext48 = pt_opcs_tip + pt_pl_ip_sext48_size, + ptps_tip_full = pt_opcs_tip + pt_pl_ip_full_size, + ptps_tip_pge_supp = pt_opcs_tip_pge, + ptps_tip_pge_upd16 = pt_opcs_tip_pge + pt_pl_ip_upd16_size, + ptps_tip_pge_upd32 = pt_opcs_tip_pge + pt_pl_ip_upd32_size, + ptps_tip_pge_upd48 = pt_opcs_tip_pge + pt_pl_ip_upd48_size, + ptps_tip_pge_sext48 = pt_opcs_tip_pge + pt_pl_ip_sext48_size, + ptps_tip_pge_full = pt_opcs_tip_pge + pt_pl_ip_full_size, + ptps_tip_pgd_supp = pt_opcs_tip_pgd, + ptps_tip_pgd_upd16 = pt_opcs_tip_pgd + pt_pl_ip_upd16_size, + ptps_tip_pgd_upd32 = pt_opcs_tip_pgd + pt_pl_ip_upd32_size, + ptps_tip_pgd_upd48 = pt_opcs_tip_pgd + pt_pl_ip_upd48_size, + ptps_tip_pgd_sext48 = pt_opcs_tip_pgd + pt_pl_ip_sext48_size, + ptps_tip_pgd_full = pt_opcs_tip_pgd + pt_pl_ip_full_size, + ptps_fup_supp = pt_opcs_fup, + ptps_fup_upd16 = pt_opcs_fup + pt_pl_ip_upd16_size, + ptps_fup_upd32 = pt_opcs_fup + pt_pl_ip_upd32_size, + ptps_fup_upd48 = pt_opcs_fup + pt_pl_ip_upd48_size, + ptps_fup_sext48 = pt_opcs_fup + pt_pl_ip_sext48_size, + ptps_fup_full = pt_opcs_fup + pt_pl_ip_full_size, + ptps_tma = pt_opcs_tma + pt_pl_tma_size, + ptps_stop = pt_opcs_stop, + ptps_vmcs = pt_opcs_vmcs + pt_pl_vmcs_size, + ptps_mnt = pt_opcs_mnt + pt_pl_mnt_size +}; + + + +/* Errors. */ + + + +/** Error codes. */ +enum pt_error_code { + /* No error. Everything is OK. */ + pte_ok, + + /* Internal decoder error. */ + pte_internal, + + /* Invalid argument. */ + pte_invalid, + + /* Decoder out of sync. */ + pte_nosync, + + /* Unknown opcode. */ + pte_bad_opc, + + /* Unknown payload. */ + pte_bad_packet, + + /* Unexpected packet context. */ + pte_bad_context, + + /* Decoder reached end of trace stream. */ + pte_eos, + + /* No packet matching the query to be found. */ + pte_bad_query, + + /* Decoder out of memory. */ + pte_nomem, + + /* Bad configuration. */ + pte_bad_config, + + /* There is no IP. */ + pte_noip, + + /* The IP has been suppressed. */ + pte_ip_suppressed, + + /* There is no memory mapped at the requested address. */ + pte_nomap, + + /* An instruction could not be decoded. */ + pte_bad_insn, + + /* No wall-clock time is available. */ + pte_no_time, + + /* No core:bus ratio available. */ + pte_no_cbr, + + /* Bad traced image. */ + pte_bad_image, + + /* A locking error. */ + pte_bad_lock, + + /* The requested feature is not supported. */ + pte_not_supported, + + /* The return address stack is empty. */ + pte_retstack_empty, + + /* A compressed return is not indicated correctly by a taken branch. */ + pte_bad_retcomp, + + /* The current decoder state does not match the state in the trace. */ + pte_bad_status_update, + + /* The trace did not contain an expected enabled event. */ + pte_no_enable, + + /* An event was ignored. */ + pte_event_ignored +}; + + +/** Decode a function return value into an pt_error_code. */ +static inline enum pt_error_code pt_errcode(int status) +{ + return (status >= 0) ? pte_ok : (enum pt_error_code) -status; +} + +/** Return a human readable error string. */ +extern pt_export const char *pt_errstr(enum pt_error_code); + + + +/* Configuration. */ + + + +/** A cpu vendor. */ +enum pt_cpu_vendor { + pcv_unknown, + pcv_intel +}; + +/** A cpu identifier. */ +struct pt_cpu { + /** The cpu vendor. */ + enum pt_cpu_vendor vendor; + + /** The cpu family. */ + uint16_t family; + + /** The cpu model. */ + uint8_t model; + + /** The stepping. */ + uint8_t stepping; +}; + +/** A collection of Intel PT errata. */ +struct pt_errata { + /** BDM70: Intel(R) Processor Trace PSB+ Packets May Contain + * Unexpected Packets. + * + * Same as: SKD024. + * + * Some Intel Processor Trace packets should be issued only between + * TIP.PGE and TIP.PGD packets. Due to this erratum, when a TIP.PGE + * packet is generated it may be preceded by a PSB+ that incorrectly + * includes FUP and MODE.Exec packets. + */ + uint32_t bdm70:1; + + /** BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet May Be + * Recorded Following a Transactional Abort. + * + * Use of Intel(R) Transactional Synchronization Extensions (Intel(R) + * TSX) may result in a transactional abort. If an abort occurs + * immediately following a branch instruction, an incorrect branch + * target may be logged in an LBR (Last Branch Record) or in an Intel(R) + * Processor Trace (Intel(R) PT) packet before the LBR or Intel PT + * packet produced by the abort. + */ + uint32_t bdm64:1; + + /** SKD007: Intel(R) PT Buffer Overflow May Result in Incorrect Packets. + * + * Under complex micro-architectural conditions, an Intel PT (Processor + * Trace) OVF (Overflow) packet may be issued after the first byte of a + * multi-byte CYC (Cycle Count) packet, instead of any remaining bytes + * of the CYC. + */ + uint32_t skd007:1; + + /** SKD022: VM Entry That Clears TraceEn May Generate a FUP. + * + * If VM entry clears Intel(R) PT (Intel Processor Trace) + * IA32_RTIT_CTL.TraceEn (MSR 570H, bit 0) while PacketEn is 1 then a + * FUP (Flow Update Packet) will precede the TIP.PGD (Target IP Packet, + * Packet Generation Disable). VM entry can clear TraceEn if the + * VM-entry MSR-load area includes an entry for the IA32_RTIT_CTL MSR. + */ + uint32_t skd022:1; + + /** SKD010: Intel(R) PT FUP May be Dropped After OVF. + * + * Same as: SKD014. + * + * Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not + * be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP + * Packet, Packet Generation Enable). + */ + uint32_t skd010:1; + + /* Reserve a few bytes for the future. */ + uint32_t reserved[15]; +}; + +/** A collection of decoder-specific configuration flags. */ +struct pt_conf_flags { + /** The decoder variant. */ + union { + /** Flags for the block decoder. */ + struct { + /** End a block after a call instruction. */ + uint32_t end_on_call:1; + } block; + + /* Reserve a few bytes for future extensions. */ + uint32_t reserved[4]; + } variant; +}; + +/** An unknown packet. */ +struct pt_packet_unknown; + +/** An Intel PT decoder configuration. + */ +struct pt_config { + /** The size of the config structure in bytes. */ + size_t size; + + /** The trace buffer begin address. */ + uint8_t *begin; + + /** The trace buffer end address. */ + uint8_t *end; + + /** An optional callback for handling unknown packets. + * + * If \@callback is not NULL, it is called for any unknown opcode. + */ + struct { + /** The callback function. + * + * It shall decode the packet at \@pos into \@unknown. + * It shall return the number of bytes read upon success. + * It shall return a negative pt_error_code otherwise. + * The below context is passed as \@context. + */ + int (*callback)(struct pt_packet_unknown *unknown, + const struct pt_config *config, + const uint8_t *pos, void *context); + + /** The user-defined context for this configuration. */ + void *context; + } decode; + + /** The cpu on which Intel PT has been recorded. */ + struct pt_cpu cpu; + + /** The errata to apply when encoding or decoding Intel PT. */ + struct pt_errata errata; + + /* The CTC frequency. + * + * This is only required if MTC packets have been enabled in + * IA32_RTIT_CTRL.MTCEn. + */ + uint32_t cpuid_0x15_eax, cpuid_0x15_ebx; + + /* The MTC frequency as defined in IA32_RTIT_CTL.MTCFreq. + * + * This is only required if MTC packets have been enabled in + * IA32_RTIT_CTRL.MTCEn. + */ + uint8_t mtc_freq; + + /* The nominal frequency as defined in MSR_PLATFORM_INFO[15:8]. + * + * This is only required if CYC packets have been enabled in + * IA32_RTIT_CTRL.CYCEn. + * + * If zero, timing calibration will only be able to use MTC and CYC + * packets. + * + * If not zero, timing calibration will also be able to use CBR + * packets. + */ + uint8_t nom_freq; + + /** A collection of decoder-specific flags. */ + struct pt_conf_flags flags; +}; + + +/** Zero-initialize an Intel PT configuration. */ +static inline void pt_config_init(struct pt_config *config) +{ + memset(config, 0, sizeof(*config)); + + config->size = sizeof(*config); +} + +/** Determine errata for a given cpu. + * + * Updates \@errata based on \@cpu. + * + * Returns 0 on success, a negative error code otherwise. + * Returns -pte_invalid if \@errata or \@cpu is NULL. + */ +extern pt_export int pt_cpu_errata(struct pt_errata *errata, + const struct pt_cpu *cpu); + + + +/* Packet encoder / decoder. */ + + + +/* We define a few abbreviations outside of the below enum as we don't + * want to handle those in switches. + */ +enum { + ppt_ext = pt_opc_ext << 8, + ppt_ext2 = ppt_ext << 8 | pt_ext_ext2 << 8 +}; + +/** Intel PT packet types. */ +enum pt_packet_type { + /* 1-byte header packets. */ + ppt_pad = pt_opc_pad, + ppt_tip = pt_opc_tip, + ppt_tnt_8 = pt_opc_tnt_8 | 0xFE, + ppt_tip_pge = pt_opc_tip_pge, + ppt_tip_pgd = pt_opc_tip_pgd, + ppt_fup = pt_opc_fup, + ppt_mode = pt_opc_mode, + ppt_tsc = pt_opc_tsc, + ppt_mtc = pt_opc_mtc, + ppt_cyc = pt_opc_cyc, + + /* 2-byte header packets. */ + ppt_psb = ppt_ext | pt_ext_psb, + ppt_tnt_64 = ppt_ext | pt_ext_tnt_64, + ppt_pip = ppt_ext | pt_ext_pip, + ppt_stop = ppt_ext | pt_ext_stop, + ppt_ovf = ppt_ext | pt_ext_ovf, + ppt_psbend = ppt_ext | pt_ext_psbend, + ppt_cbr = ppt_ext | pt_ext_cbr, + ppt_tma = ppt_ext | pt_ext_tma, + ppt_vmcs = ppt_ext | pt_ext_vmcs, + + /* 3-byte header packets. */ + ppt_mnt = ppt_ext2 | pt_ext2_mnt, + + /* A packet decodable by the optional decoder callback. */ + ppt_unknown = 0x7ffffffe, + + /* An invalid packet. */ + ppt_invalid = 0x7fffffff +}; + +/** A TNT-8 or TNT-64 packet. */ +struct pt_packet_tnt { + /** TNT payload bit size. */ + uint8_t bit_size; + + /** TNT payload excluding stop bit. */ + uint64_t payload; +}; + +/** A packet with IP payload. */ +struct pt_packet_ip { + /** IP compression. */ + enum pt_ip_compression ipc; + + /** Zero-extended payload ip. */ + uint64_t ip; +}; + +/** A mode.exec packet. */ +struct pt_packet_mode_exec { + /** The mode.exec csl bit. */ + uint32_t csl:1; + + /** The mode.exec csd bit. */ + uint32_t csd:1; +}; + +static inline enum pt_exec_mode +pt_get_exec_mode(const struct pt_packet_mode_exec *packet) +{ + if (packet->csl) + return packet->csd ? ptem_unknown : ptem_64bit; + else + return packet->csd ? ptem_32bit : ptem_16bit; +} + +static inline struct pt_packet_mode_exec +pt_set_exec_mode(enum pt_exec_mode mode) +{ + struct pt_packet_mode_exec packet; + + switch (mode) { + default: + packet.csl = 1; + packet.csd = 1; + break; + + case ptem_64bit: + packet.csl = 1; + packet.csd = 0; + break; + + case ptem_32bit: + packet.csl = 0; + packet.csd = 1; + break; + + case ptem_16bit: + packet.csl = 0; + packet.csd = 0; + break; + } + + return packet; +} + +/** A mode.tsx packet. */ +struct pt_packet_mode_tsx { + /** The mode.tsx intx bit. */ + uint32_t intx:1; + + /** The mode.tsx abrt bit. */ + uint32_t abrt:1; +}; + +/** A mode packet. */ +struct pt_packet_mode { + /** Mode leaf. */ + enum pt_mode_leaf leaf; + + /** Mode bits. */ + union { + /** Packet: mode.exec. */ + struct pt_packet_mode_exec exec; + + /** Packet: mode.tsx. */ + struct pt_packet_mode_tsx tsx; + } bits; +}; + +/** A PIP packet. */ +struct pt_packet_pip { + /** The CR3 value. */ + uint64_t cr3; + + /** The non-root bit. */ + uint32_t nr:1; +}; + +/** A TSC packet. */ +struct pt_packet_tsc { + /** The TSC value. */ + uint64_t tsc; +}; + +/** A CBR packet. */ +struct pt_packet_cbr { + /** The core/bus cycle ratio. */ + uint8_t ratio; +}; + +/** A TMA packet. */ +struct pt_packet_tma { + /** The crystal clock tick counter value. */ + uint16_t ctc; + + /** The fast counter value. */ + uint16_t fc; +}; + +/** A MTC packet. */ +struct pt_packet_mtc { + /** The crystal clock tick counter value. */ + uint8_t ctc; +}; + +/** A CYC packet. */ +struct pt_packet_cyc { + /** The cycle counter value. */ + uint64_t value; +}; + +/** A VMCS packet. */ +struct pt_packet_vmcs { + /* The VMCS Base Address (i.e. the shifted payload). */ + uint64_t base; +}; + +/** A MNT packet. */ +struct pt_packet_mnt { + /** The raw payload. */ + uint64_t payload; +}; + +/** An unknown packet decodable by the optional decoder callback. */ +struct pt_packet_unknown { + /** Pointer to the raw packet bytes. */ + const uint8_t *packet; + + /** Optional pointer to a user-defined structure. */ + void *priv; +}; + +/** An Intel PT packet. */ +struct pt_packet { + /** The type of the packet. + * + * This also determines the \@payload field. + */ + enum pt_packet_type type; + + /** The size of the packet including opcode and payload. */ + uint8_t size; + + /** Packet specific data. */ + union { + /** Packets: pad, ovf, psb, psbend, stop - no payload. */ + + /** Packet: tnt-8, tnt-64. */ + struct pt_packet_tnt tnt; + + /** Packet: tip, fup, tip.pge, tip.pgd. */ + struct pt_packet_ip ip; + + /** Packet: mode. */ + struct pt_packet_mode mode; + + /** Packet: pip. */ + struct pt_packet_pip pip; + + /** Packet: tsc. */ + struct pt_packet_tsc tsc; + + /** Packet: cbr. */ + struct pt_packet_cbr cbr; + + /** Packet: tma. */ + struct pt_packet_tma tma; + + /** Packet: mtc. */ + struct pt_packet_mtc mtc; + + /** Packet: cyc. */ + struct pt_packet_cyc cyc; + + /** Packet: vmcs. */ + struct pt_packet_vmcs vmcs; + + /** Packet: mnt. */ + struct pt_packet_mnt mnt; + + /** Packet: unknown. */ + struct pt_packet_unknown unknown; + } payload; +}; + + + +/* Packet encoder. */ + + + +/** Allocate an Intel PT packet encoder. + * + * The encoder will work on the buffer defined in \@config, it shall contain + * raw trace data and remain valid for the lifetime of the encoder. + * + * The encoder starts at the beginning of the trace buffer. + */ +extern pt_export struct pt_encoder * +pt_alloc_encoder(const struct pt_config *config); + +/** Free an Intel PT packet encoder. + * + * The \@encoder must not be used after a successful return. + */ +extern pt_export void pt_free_encoder(struct pt_encoder *encoder); + +/** Hard set synchronization point of an Intel PT packet encoder. + * + * Synchronize \@encoder to \@offset within the trace buffer. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_eos if the given offset is behind the end of the trace buffer. + * Returns -pte_invalid if \@encoder is NULL. + */ +extern pt_export int pt_enc_sync_set(struct pt_encoder *encoder, + uint64_t offset); + +/** Get the current packet encoder position. + * + * Fills the current \@encoder position into \@offset. + * + * This is useful for reporting errors. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@encoder or \@offset is NULL. + */ +extern pt_export int pt_enc_get_offset(struct pt_encoder *encoder, + uint64_t *offset); + +/* Return a pointer to \@encoder's configuration. + * + * Returns a non-null pointer on success, NULL if \@encoder is NULL. + */ +extern pt_export const struct pt_config * +pt_enc_get_config(const struct pt_encoder *encoder); + +/** Encode an Intel PT packet. + * + * Writes \@packet at \@encoder's current position in the Intel PT buffer and + * advances the \@encoder beyond the written packet. + * + * The \@packet.size field is ignored. + * + * In case of errors, the \@encoder is not advanced and nothing is written + * into the Intel PT buffer. + * + * Returns the number of bytes written on success, a negative error code + * otherwise. + * + * Returns -pte_bad_opc if \@packet.type is not known. + * Returns -pte_bad_packet if \@packet's payload is invalid. + * Returns -pte_eos if \@encoder reached the end of the Intel PT buffer. + * Returns -pte_invalid if \@encoder or \@packet is NULL. + */ +extern pt_export int pt_enc_next(struct pt_encoder *encoder, + const struct pt_packet *packet); + + + +/* Packet decoder. */ + + + +/** Allocate an Intel PT packet decoder. + * + * The decoder will work on the buffer defined in \@config, it shall contain + * raw trace data and remain valid for the lifetime of the decoder. + * + * The decoder needs to be synchronized before it can be used. + */ +extern pt_export struct pt_packet_decoder * +pt_pkt_alloc_decoder(const struct pt_config *config); + +/** Free an Intel PT packet decoder. + * + * The \@decoder must not be used after a successful return. + */ +extern pt_export void pt_pkt_free_decoder(struct pt_packet_decoder *decoder); + +/** Synchronize an Intel PT packet decoder. + * + * Search for the next synchronization point in forward or backward direction. + * + * If \@decoder has not been synchronized, yet, the search is started at the + * beginning of the trace buffer in case of forward synchronization and at the + * end of the trace buffer in case of backward synchronization. + * + * Returns zero or a positive value on success, a negative error code otherwise. + * + * Returns -pte_eos if no further synchronization point is found. + * Returns -pte_invalid if \@decoder is NULL. + */ +extern pt_export int pt_pkt_sync_forward(struct pt_packet_decoder *decoder); +extern pt_export int pt_pkt_sync_backward(struct pt_packet_decoder *decoder); + +/** Hard set synchronization point of an Intel PT decoder. + * + * Synchronize \@decoder to \@offset within the trace buffer. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_eos if the given offset is behind the end of the trace buffer. + * Returns -pte_invalid if \@decoder is NULL. + */ +extern pt_export int pt_pkt_sync_set(struct pt_packet_decoder *decoder, + uint64_t offset); + +/** Get the current decoder position. + * + * Fills the current \@decoder position into \@offset. + * + * This is useful for reporting errors. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@offset is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_pkt_get_offset(struct pt_packet_decoder *decoder, + uint64_t *offset); + +/** Get the position of the last synchronization point. + * + * Fills the last synchronization position into \@offset. + * + * This is useful when splitting a trace stream for parallel decoding. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@offset is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_pkt_get_sync_offset(struct pt_packet_decoder *decoder, + uint64_t *offset); + +/* Return a pointer to \@decoder's configuration. + * + * Returns a non-null pointer on success, NULL if \@decoder is NULL. + */ +extern pt_export const struct pt_config * +pt_pkt_get_config(const struct pt_packet_decoder *decoder); + +/** Decode the next packet and advance the decoder. + * + * Decodes the packet at \@decoder's current position into \@packet and + * adjusts the \@decoder's position by the number of bytes the packet had + * consumed. + * + * The \@size argument must be set to sizeof(struct pt_packet). + * + * Returns the number of bytes consumed on success, a negative error code + * otherwise. + * + * Returns -pte_bad_opc if the packet is unknown. + * Returns -pte_bad_packet if an unknown packet payload is encountered. + * Returns -pte_eos if \@decoder reached the end of the Intel PT buffer. + * Returns -pte_invalid if \@decoder or \@packet is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_pkt_next(struct pt_packet_decoder *decoder, + struct pt_packet *packet, size_t size); + + + +/* Query decoder. */ + + + +/** Decoder status flags. */ +enum pt_status_flag { + /** There is an event pending. */ + pts_event_pending = 1 << 0, + + /** The address has been suppressed. */ + pts_ip_suppressed = 1 << 1, + + /** There is no more trace data available. */ + pts_eos = 1 << 2 +}; + +/** Event types. */ +enum pt_event_type { + /* Tracing has been enabled/disabled. */ + ptev_enabled, + ptev_disabled, + + /* Tracing has been disabled asynchronously. */ + ptev_async_disabled, + + /* An asynchronous branch, e.g. interrupt. */ + ptev_async_branch, + + /* A synchronous paging event. */ + ptev_paging, + + /* An asynchronous paging event. */ + ptev_async_paging, + + /* Trace overflow. */ + ptev_overflow, + + /* An execution mode change. */ + ptev_exec_mode, + + /* A transactional execution state change. */ + ptev_tsx, + + /* Trace Stop. */ + ptev_stop, + + /* A synchronous vmcs event. */ + ptev_vmcs, + + /* An asynchronous vmcs event. */ + ptev_async_vmcs +}; + +/** An event. */ +struct pt_event { + /** The type of the event. */ + enum pt_event_type type; + + /** A flag indicating that the event IP has been suppressed. */ + uint32_t ip_suppressed:1; + + /** A flag indicating that the event is for status update. */ + uint32_t status_update:1; + + /** A flag indicating that the event has timing information. */ + uint32_t has_tsc:1; + + /** The time stamp count of the event. + * + * This field is only valid if \@has_tsc is set. + */ + uint64_t tsc; + + /** The number of lost mtc and cyc packets. + * + * This gives an idea about the quality of the \@tsc. The more packets + * were dropped, the less precise timing is. + */ + uint32_t lost_mtc; + uint32_t lost_cyc; + + /* Reserved space for future extensions. */ + uint64_t reserved[2]; + + /** Event specific data. */ + union { + /** Event: enabled. */ + struct { + /** The address at which tracing resumes. */ + uint64_t ip; + } enabled; + + /** Event: disabled. */ + struct { + /** The destination of the first branch inside a + * filtered area. + * + * This field is not valid if \@ip_suppressed is set. + */ + uint64_t ip; + + /* The exact source ip needs to be determined using + * disassembly and the filter configuration. + */ + } disabled; + + /** Event: async disabled. */ + struct { + /** The source address of the asynchronous branch that + * disabled tracing. + */ + uint64_t at; + + /** The destination of the first branch inside a + * filtered area. + * + * This field is not valid if \@ip_suppressed is set. + */ + uint64_t ip; + } async_disabled; + + /** Event: async branch. */ + struct { + /** The branch source address. */ + uint64_t from; + + /** The branch destination address. + * + * This field is not valid if \@ip_suppressed is set. + */ + uint64_t to; + } async_branch; + + /** Event: paging. */ + struct { + /** The updated CR3 value. + * + * The lower 5 bit have been zeroed out. + * The upper bits have been zeroed out depending on the + * maximum possible address. + */ + uint64_t cr3; + + /** A flag indicating whether the cpu is operating in + * vmx non-root (guest) mode. + */ + uint32_t non_root:1; + + /* The address at which the event is effective is + * obvious from the disassembly. + */ + } paging; + + /** Event: async paging. */ + struct { + /** The updated CR3 value. + * + * The lower 5 bit have been zeroed out. + * The upper bits have been zeroed out depending on the + * maximum possible address. + */ + uint64_t cr3; + + /** A flag indicating whether the cpu is operating in + * vmx non-root (guest) mode. + */ + uint32_t non_root:1; + + /** The address at which the event is effective. */ + uint64_t ip; + } async_paging; + + /** Event: overflow. */ + struct { + /** The address at which tracing resumes after overflow. + * + * This field is not valid, if ip_suppressed is set. + * In this case, the overflow resolved while tracing + * was disabled. + */ + uint64_t ip; + } overflow; + + /** Event: exec mode. */ + struct { + /** The execution mode. */ + enum pt_exec_mode mode; + + /** The address at which the event is effective. */ + uint64_t ip; + } exec_mode; + + /** Event: tsx. */ + struct { + /** The address at which the event is effective. + * + * This field is not valid if \@ip_suppressed is set. + */ + uint64_t ip; + + /** A flag indicating speculative execution mode. */ + uint32_t speculative:1; + + /** A flag indicating speculative execution aborts. */ + uint32_t aborted:1; + } tsx; + + /** Event: vmcs. */ + struct { + /** The VMCS base address. + * + * The address is zero-extended with the lower 12 bits + * all zero. + */ + uint64_t base; + + /* The new VMCS base address should be stored and + * applied on subsequent VM entries. + */ + } vmcs; + + /** Event: async vmcs. */ + struct { + /** The VMCS base address. + * + * The address is zero-extended with the lower 12 bits + * all zero. + */ + uint64_t base; + + /** The address at which the event is effective. */ + uint64_t ip; + + /* An async paging event that binds to the same IP + * will always succeed this async vmcs event. + */ + } async_vmcs; + } variant; +}; + + +/** Allocate an Intel PT query decoder. + * + * The decoder will work on the buffer defined in \@config, it shall contain + * raw trace data and remain valid for the lifetime of the decoder. + * + * The decoder needs to be synchronized before it can be used. + */ +extern pt_export struct pt_query_decoder * +pt_qry_alloc_decoder(const struct pt_config *config); + +/** Free an Intel PT query decoder. + * + * The \@decoder must not be used after a successful return. + */ +extern pt_export void pt_qry_free_decoder(struct pt_query_decoder *decoder); + +/** Synchronize an Intel PT query decoder. + * + * Search for the next synchronization point in forward or backward direction. + * + * If \@decoder has not been synchronized, yet, the search is started at the + * beginning of the trace buffer in case of forward synchronization and at the + * end of the trace buffer in case of backward synchronization. + * + * If \@ip is not NULL, set it to last ip. + * + * Returns a non-negative pt_status_flag bit-vector on success, a negative error + * code otherwise. + * + * Returns -pte_bad_opc if an unknown packet is encountered. + * Returns -pte_bad_packet if an unknown packet payload is encountered. + * Returns -pte_eos if no further synchronization point is found. + * Returns -pte_invalid if \@decoder is NULL. + */ +extern pt_export int pt_qry_sync_forward(struct pt_query_decoder *decoder, + uint64_t *ip); +extern pt_export int pt_qry_sync_backward(struct pt_query_decoder *decoder, + uint64_t *ip); + +/** Manually synchronize an Intel PT query decoder. + * + * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB + * packet at \@offset. + * + * If \@ip is not NULL, set it to last ip. + * + * Returns a non-negative pt_status_flag bit-vector on success, a negative error + * code otherwise. + * + * Returns -pte_bad_opc if an unknown packet is encountered. + * Returns -pte_bad_packet if an unknown packet payload is encountered. + * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer. + * Returns -pte_eos if \@decoder reaches the end of its trace buffer. + * Returns -pte_invalid if \@decoder is NULL. + * Returns -pte_nosync if there is no syncpoint at \@offset. + */ +extern pt_export int pt_qry_sync_set(struct pt_query_decoder *decoder, + uint64_t *ip, uint64_t offset); + +/** Get the current decoder position. + * + * Fills the current \@decoder position into \@offset. + * + * This is useful for reporting errors. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@offset is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_qry_get_offset(struct pt_query_decoder *decoder, + uint64_t *offset); + +/** Get the position of the last synchronization point. + * + * Fills the last synchronization position into \@offset. + * + * This is useful for splitting a trace stream for parallel decoding. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@offset is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_qry_get_sync_offset(struct pt_query_decoder *decoder, + uint64_t *offset); + +/* Return a pointer to \@decoder's configuration. + * + * Returns a non-null pointer on success, NULL if \@decoder is NULL. + */ +extern pt_export const struct pt_config * +pt_qry_get_config(const struct pt_query_decoder *decoder); + +/** Query whether the next unconditional branch has been taken. + * + * On success, provides 1 (taken) or 0 (not taken) in \@taken for the next + * conditional branch and updates \@decoder. + * + * Returns a non-negative pt_status_flag bit-vector on success, a negative error + * code otherwise. + * + * Returns -pte_bad_opc if an unknown packet is encountered. + * Returns -pte_bad_packet if an unknown packet payload is encountered. + * Returns -pte_bad_query if no conditional branch is found. + * Returns -pte_eos if decoding reached the end of the Intel PT buffer. + * Returns -pte_invalid if \@decoder or \@taken is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_qry_cond_branch(struct pt_query_decoder *decoder, + int *taken); + +/** Get the next indirect branch destination. + * + * On success, provides the linear destination address of the next indirect + * branch in \@ip and updates \@decoder. + * + * Returns a non-negative pt_status_flag bit-vector on success, a negative error + * code otherwise. + * + * Returns -pte_bad_opc if an unknown packet is encountered. + * Returns -pte_bad_packet if an unknown packet payload is encountered. + * Returns -pte_bad_query if no indirect branch is found. + * Returns -pte_eos if decoding reached the end of the Intel PT buffer. + * Returns -pte_invalid if \@decoder or \@ip is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_qry_indirect_branch(struct pt_query_decoder *decoder, + uint64_t *ip); + +/** Query the next pending event. + * + * On success, provides the next event \@event and updates \@decoder. + * + * The \@size argument must be set to sizeof(struct pt_event). + * + * Returns a non-negative pt_status_flag bit-vector on success, a negative error + * code otherwise. + * + * Returns -pte_bad_opc if an unknown packet is encountered. + * Returns -pte_bad_packet if an unknown packet payload is encountered. + * Returns -pte_bad_query if no event is found. + * Returns -pte_eos if decoding reached the end of the Intel PT buffer. + * Returns -pte_invalid if \@decoder or \@event is NULL. + * Returns -pte_invalid if \@size is too small. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_qry_event(struct pt_query_decoder *decoder, + struct pt_event *event, size_t size); + +/** Query the current time. + * + * On success, provides the time at \@decoder's current position in \@time. + * Since \@decoder is reading ahead until the next indirect branch or event, + * the value matches the time for that branch or event. + * + * The time is similar to what a rdtsc instruction would return. Depending + * on the configuration, the time may not be fully accurate. If TSC is not + * enabled, the time is relative to the last synchronization and can't be used + * to correlate with other TSC-based time sources. In this case, -pte_no_time + * is returned and the relative time is provided in \@time. + * + * Some timing-related packets may need to be dropped (mostly due to missing + * calibration or incomplete configuration). To get an idea about the quality + * of the estimated time, we record the number of dropped MTC and CYC packets. + * + * If \@lost_mtc is not NULL, set it to the number of lost MTC packets. + * If \@lost_cyc is not NULL, set it to the number of lost CYC packets. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@time is NULL. + * Returns -pte_no_time if there has not been a TSC packet. + */ +extern pt_export int pt_qry_time(struct pt_query_decoder *decoder, + uint64_t *time, uint32_t *lost_mtc, + uint32_t *lost_cyc); + +/** Return the current core bus ratio. + * + * On success, provides the core:bus ratio at \@decoder's current position + * in \@cbr. + * Since \@decoder is reading ahead until the next indirect branch or event, + * the value matches the core:bus ratio for that branch or event. + * + * The ratio is defined as core cycles per bus clock cycle. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@cbr is NULL. + * Returns -pte_no_cbr if there has not been a CBR packet. + */ +extern pt_export int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder, + uint32_t *cbr); + + + +/* Traced image. */ + + + +/** An Intel PT address space identifier. + * + * This identifies a particular address space when adding file sections or + * when reading memory. + */ +struct pt_asid { + /** The size of this object - set to sizeof(struct pt_asid). */ + size_t size; + + /** The CR3 value. */ + uint64_t cr3; + + /** The VMCS Base address. */ + uint64_t vmcs; +}; + +/** An unknown CR3 value to be used for pt_asid objects. */ +static const uint64_t pt_asid_no_cr3 = 0xffffffffffffffffull; + +/** An unknown VMCS Base value to be used for pt_asid objects. */ +static const uint64_t pt_asid_no_vmcs = 0xffffffffffffffffull; + +/** Initialize an address space identifier. */ +static inline void pt_asid_init(struct pt_asid *asid) +{ + asid->size = sizeof(*asid); + asid->cr3 = pt_asid_no_cr3; + asid->vmcs = pt_asid_no_vmcs; +} + + +/** A cache of traced image sections. */ +struct pt_image_section_cache; + +/** Allocate a traced memory image section cache. + * + * An optional \@name may be given to the cache. The name string is copied. + * + * Returns a new traced memory image section cache on success, NULL otherwise. + */ +extern pt_export struct pt_image_section_cache * +pt_iscache_alloc(const char *name); + +/** Free a traced memory image section cache. + * + * The \@iscache must have been allocated with pt_iscache_alloc(). + * The \@iscache must not be used after a successful return. + */ +extern pt_export void pt_iscache_free(struct pt_image_section_cache *iscache); + +/** Get the image section cache name. + * + * Returns a pointer to \@iscache's name or NULL if there is no name. + */ +extern pt_export const char * +pt_iscache_name(const struct pt_image_section_cache *iscache); + +/** Add a new file section to the traced memory image section cache. + * + * Adds a new section consisting of \@size bytes starting at \@offset in + * \@filename loaded at the virtual address \@vaddr if \@iscache does not + * already contain such a section. + * + * Returns an image section identifier (isid) uniquely identifying that section + * in \@iscache. + * + * The section is silently truncated to match the size of \@filename. + * + * Returns a positive isid on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@iscache or \@filename is NULL. + * Returns -pte_invalid if \@offset is too big. + */ +extern pt_export int pt_iscache_add_file(struct pt_image_section_cache *iscache, + const char *filename, uint64_t offset, + uint64_t size, uint64_t vaddr); + +/** Read memory from a cached file section + * + * Reads \@size bytes of memory starting at virtual address \@vaddr in the + * section identified by \@isid in \@iscache into \@buffer. + * + * The caller is responsible for allocating a \@buffer of at least \@size bytes. + * + * The read request may be truncated if it crosses section boundaries or if + * \@size is getting too big. We support reading at least 4Kbyte in one chunk + * unless the read would cross a section boundary. + * + * Returns the number of bytes read on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@iscache or \@buffer is NULL. + * Returns -pte_invalid if \@size is zero. + * Returns -pte_nomap if \@vaddr is not contained in section \@isid. + * Returns -pte_bad_image if \@iscache does not contain \@isid. + */ +extern pt_export int pt_iscache_read(struct pt_image_section_cache *iscache, + uint8_t *buffer, uint64_t size, int isid, + uint64_t vaddr); + +/** The traced memory image. */ +struct pt_image; + + +/** Allocate a traced memory image. + * + * An optional \@name may be given to the image. The name string is copied. + * + * Returns a new traced memory image on success, NULL otherwise. + */ +extern pt_export struct pt_image *pt_image_alloc(const char *name); + +/** Free a traced memory image. + * + * The \@image must have been allocated with pt_image_alloc(). + * The \@image must not be used after a successful return. + */ +extern pt_export void pt_image_free(struct pt_image *image); + +/** Get the image name. + * + * Returns a pointer to \@image's name or NULL if there is no name. + */ +extern pt_export const char *pt_image_name(const struct pt_image *image); + +/** Add a new file section to the traced memory image. + * + * Adds \@size bytes starting at \@offset in \@filename. The section is + * loaded at the virtual address \@vaddr in the address space \@asid. + * + * The \@asid may be NULL or (partially) invalid. In that case only the valid + * fields are considered when comparing with other address-spaces. Use this + * when tracing a single process or when adding sections to all processes. + * + * The section is silently truncated to match the size of \@filename. + * + * Existing sections that would overlap with the new section will be shrunk + * or split. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@image or \@filename is NULL. + * Returns -pte_invalid if \@offset is too big. + */ +extern pt_export int pt_image_add_file(struct pt_image *image, + const char *filename, uint64_t offset, + uint64_t size, + const struct pt_asid *asid, + uint64_t vaddr); + +/** Add a section from an image section cache. + * + * Add the section from \@iscache identified by \@isid in address space \@asid. + * + * Existing sections that would overlap with the new section will be shrunk + * or split. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_invalid if \@image or \@iscache is NULL. + * Returns -pte_bad_image if \@iscache does not contain \@isid. + */ +extern pt_export int pt_image_add_cached(struct pt_image *image, + struct pt_image_section_cache *iscache, + int isid, const struct pt_asid *asid); + +/** Copy an image. + * + * Adds all sections from \@src to \@image. Sections that could not be added + * will be ignored. + * + * Returns the number of ignored sections on success, a negative error code + * otherwise. + * + * Returns -pte_invalid if \@image or \@src is NULL. + */ +extern pt_export int pt_image_copy(struct pt_image *image, + const struct pt_image *src); + +/** Remove all sections loaded from a file. + * + * Removes all sections loaded from \@filename from the address space \@asid. + * Specify the same \@asid that was used for adding sections from \@filename. + * + * Returns the number of removed sections on success, a negative error code + * otherwise. + * + * Returns -pte_invalid if \@image or \@filename is NULL. + */ +extern pt_export int pt_image_remove_by_filename(struct pt_image *image, + const char *filename, + const struct pt_asid *asid); + +/** Remove all sections loaded into an address space. + * + * Removes all sections loaded into \@asid. Specify the same \@asid that was + * used for adding sections. + * + * Returns the number of removed sections on success, a negative error code + * otherwise. + * + * Returns -pte_invalid if \@image is NULL. + */ +extern pt_export int pt_image_remove_by_asid(struct pt_image *image, + const struct pt_asid *asid); + +/** A read memory callback function. + * + * It shall read \@size bytes of memory from address space \@asid starting + * at \@ip into \@buffer. + * + * It shall return the number of bytes read on success. + * It shall return a negative pt_error_code otherwise. + */ +typedef int (read_memory_callback_t)(uint8_t *buffer, size_t size, + const struct pt_asid *asid, + uint64_t ip, void *context); + +/** Set the memory callback for the traced memory image. + * + * Sets \@callback for reading memory. The callback is used for addresses + * that are not found in file sections. The \@context argument is passed + * to \@callback on each use. + * + * There can only be one callback at any time. A subsequent call will replace + * the previous callback. If \@callback is NULL, the callback is removed. + * + * Returns -pte_invalid if \@image is NULL. + */ +extern pt_export int pt_image_set_callback(struct pt_image *image, + read_memory_callback_t *callback, + void *context); + + + +/* Instruction flow decoder. */ + + + +/** The instruction class. + * + * We provide only a very coarse classification suitable for reconstructing + * the execution flow. + */ +enum pt_insn_class { + /* The instruction could not be classified. */ + ptic_error, + + /* The instruction is something not listed below. */ + ptic_other, + + /* The instruction is a near (function) call. */ + ptic_call, + + /* The instruction is a near (function) return. */ + ptic_return, + + /* The instruction is a near unconditional jump. */ + ptic_jump, + + /* The instruction is a near conditional jump. */ + ptic_cond_jump, + + /* The instruction is a call-like far transfer. + * E.g. SYSCALL, SYSENTER, or FAR CALL. + */ + ptic_far_call, + + /* The instruction is a return-like far transfer. + * E.g. SYSRET, SYSEXIT, IRET, or FAR RET. + */ + ptic_far_return, + + /* The instruction is a jump-like far transfer. + * E.g. FAR JMP. + */ + ptic_far_jump +}; + +/** The maximal size of an instruction. */ +enum { + pt_max_insn_size = 15 +}; + +/** A single traced instruction. */ +struct pt_insn { + /** The virtual address in its process. */ + uint64_t ip; + + /** A coarse classification. */ + enum pt_insn_class iclass; + + /** The execution mode. */ + enum pt_exec_mode mode; + + /** The raw bytes. */ + uint8_t raw[pt_max_insn_size]; + + /** The size in bytes. */ + uint8_t size; + + /** A collection of flags giving additional information: + * + * - the instruction was executed speculatively. + */ + uint32_t speculative:1; + + /** - speculative execution was aborted after this instruction. */ + uint32_t aborted:1; + + /** - speculative execution was committed after this instruction. */ + uint32_t committed:1; + + /** - tracing was disabled after this instruction. */ + uint32_t disabled:1; + + /** - tracing was enabled at this instruction. */ + uint32_t enabled:1; + + /** - tracing was resumed at this instruction. + * + * In addition to tracing being enabled, it continues from the IP + * at which tracing had been disabled before. + */ + uint32_t resumed:1; + + /** - normal execution flow was interrupted after this instruction. */ + uint32_t interrupted:1; + + /** - tracing resumed at this instruction after an overflow. */ + uint32_t resynced:1; + + /** - tracing was stopped after this instruction. */ + uint32_t stopped:1; + + /** - this instruction is truncated in its image section. + * + * It starts in the image section identified by \@isid and continues + * in one or more other sections. + */ + uint32_t truncated:1; + + /** The image section identifier for the section containing this + * instruction. + * + * A value of zero means that the section did not have an identifier. + * The section was not added via an image section cache or the memory + * was read via the read memory callback. + */ + int isid; +}; + + +/** Allocate an Intel PT instruction flow decoder. + * + * The decoder will work on the buffer defined in \@config, it shall contain + * raw trace data and remain valid for the lifetime of the decoder. + * + * The decoder needs to be synchronized before it can be used. + */ +extern pt_export struct pt_insn_decoder * +pt_insn_alloc_decoder(const struct pt_config *config); + +/** Free an Intel PT instruction flow decoder. + * + * This will destroy the decoder's default image. + * + * The \@decoder must not be used after a successful return. + */ +extern pt_export void pt_insn_free_decoder(struct pt_insn_decoder *decoder); + +/** Synchronize an Intel PT instruction flow decoder. + * + * Search for the next synchronization point in forward or backward direction. + * + * If \@decoder has not been synchronized, yet, the search is started at the + * beginning of the trace buffer in case of forward synchronization and at the + * end of the trace buffer in case of backward synchronization. + * + * Returns zero or a positive value on success, a negative error code otherwise. + * + * Returns -pte_bad_opc if an unknown packet is encountered. + * Returns -pte_bad_packet if an unknown packet payload is encountered. + * Returns -pte_eos if no further synchronization point is found. + * Returns -pte_invalid if \@decoder is NULL. + */ +extern pt_export int pt_insn_sync_forward(struct pt_insn_decoder *decoder); +extern pt_export int pt_insn_sync_backward(struct pt_insn_decoder *decoder); + +/** Manually synchronize an Intel PT instruction flow decoder. + * + * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB + * packet at \@offset. + * + * Returns zero or a positive value on success, a negative error code otherwise. + * + * Returns -pte_bad_opc if an unknown packet is encountered. + * Returns -pte_bad_packet if an unknown packet payload is encountered. + * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer. + * Returns -pte_eos if \@decoder reaches the end of its trace buffer. + * Returns -pte_invalid if \@decoder is NULL. + * Returns -pte_nosync if there is no syncpoint at \@offset. + */ +extern pt_export int pt_insn_sync_set(struct pt_insn_decoder *decoder, + uint64_t offset); + +/** Get the current decoder position. + * + * Fills the current \@decoder position into \@offset. + * + * This is useful for reporting errors. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@offset is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_insn_get_offset(struct pt_insn_decoder *decoder, + uint64_t *offset); + +/** Get the position of the last synchronization point. + * + * Fills the last synchronization position into \@offset. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@offset is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_insn_get_sync_offset(struct pt_insn_decoder *decoder, + uint64_t *offset); + +/** Get the traced image. + * + * The returned image may be modified as long as no decoder that uses this + * image is running. + * + * Returns a pointer to the traced image the decoder uses for reading memory. + * Returns NULL if \@decoder is NULL. + */ +extern pt_export struct pt_image * +pt_insn_get_image(struct pt_insn_decoder *decoder); + +/** Set the traced image. + * + * Sets the image that \@decoder uses for reading memory to \@image. If \@image + * is NULL, sets the image to \@decoder's default image. + * + * Only one image can be active at any time. + * + * Returns zero on success, a negative error code otherwise. + * Return -pte_invalid if \@decoder is NULL. + */ +extern pt_export int pt_insn_set_image(struct pt_insn_decoder *decoder, + struct pt_image *image); + +/* Return a pointer to \@decoder's configuration. + * + * Returns a non-null pointer on success, NULL if \@decoder is NULL. + */ +extern pt_export const struct pt_config * +pt_insn_get_config(const struct pt_insn_decoder *decoder); + +/** Return the current time. + * + * On success, provides the time at \@decoder's current position in \@time. + * Since \@decoder is reading ahead until the next indirect branch or event, + * the value matches the time for that branch or event. + * + * The time is similar to what a rdtsc instruction would return. Depending + * on the configuration, the time may not be fully accurate. If TSC is not + * enabled, the time is relative to the last synchronization and can't be used + * to correlate with other TSC-based time sources. In this case, -pte_no_time + * is returned and the relative time is provided in \@time. + * + * Some timing-related packets may need to be dropped (mostly due to missing + * calibration or incomplete configuration). To get an idea about the quality + * of the estimated time, we record the number of dropped MTC and CYC packets. + * + * If \@lost_mtc is not NULL, set it to the number of lost MTC packets. + * If \@lost_cyc is not NULL, set it to the number of lost CYC packets. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@time is NULL. + * Returns -pte_no_time if there has not been a TSC packet. + */ +extern pt_export int pt_insn_time(struct pt_insn_decoder *decoder, + uint64_t *time, uint32_t *lost_mtc, + uint32_t *lost_cyc); + +/** Return the current core bus ratio. + * + * On success, provides the core:bus ratio at \@decoder's current position + * in \@cbr. + * Since \@decoder is reading ahead until the next indirect branch or event, + * the value matches the core:bus ratio for that branch or event. + * + * The ratio is defined as core cycles per bus clock cycle. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@cbr is NULL. + * Returns -pte_no_cbr if there has not been a CBR packet. + */ +extern pt_export int pt_insn_core_bus_ratio(struct pt_insn_decoder *decoder, + uint32_t *cbr); + +/** Determine the next instruction. + * + * On success, provides the next instruction in execution order in \@insn. + * + * The \@size argument must be set to sizeof(struct pt_insn). + * + * Returns a non-negative pt_status_flag bit-vector on success, a negative error + * code otherwise. + * + * Returns pts_eos to indicate the end of the trace stream. Subsequent calls + * to pt_insn_next() will continue to return pts_eos until trace is required + * to determine the next instruction. + * + * Returns -pte_bad_context if the decoder encountered an unexpected packet. + * Returns -pte_bad_opc if the decoder encountered unknown packets. + * Returns -pte_bad_packet if the decoder encountered unknown packet payloads. + * Returns -pte_bad_query if the decoder got out of sync. + * Returns -pte_eos if decoding reached the end of the Intel PT buffer. + * Returns -pte_invalid if \@decoder or \@insn is NULL. + * Returns -pte_nomap if the memory at the instruction address can't be read. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_insn_next(struct pt_insn_decoder *decoder, + struct pt_insn *insn, size_t size); + + + +/* Block decoder. */ + + + +/** A block of instructions. + * + * Instructions in this block are executed sequentially but are not necessarily + * contiguous in memory. Users are expected to follow direct branches. + */ +struct pt_block { + /** The IP of the first instruction in this block. */ + uint64_t ip; + + /** The IP of the last instruction in this block. + * + * This can be used for error-detection. + */ + uint64_t end_ip; + + /** The image section that contains the instructions in this block. + * + * A value of zero means that the section did not have an identifier. + * The section was not added via an image section cache or the memory + * was read via the read memory callback. + */ + int isid; + + /** The execution mode for all instructions in this block. */ + enum pt_exec_mode mode; + + /** The instruction class for the last instruction in this block. + * + * This field may be set to ptic_error to indicate that the instruction + * class is not available. The block decoder may choose to not provide + * the instruction class in some cases for performance reasons. + */ + enum pt_insn_class iclass; + + /** The number of instructions in this block. */ + uint16_t ninsn; + + /** The raw bytes of the last instruction in this block in case the + * instruction does not fit entirely into this block's section. + * + * This field is only valid if \@truncated is set. + */ + uint8_t raw[pt_max_insn_size]; + + /** The size of the last instruction in this block in bytes. + * + * This field is only valid if \@truncated is set. + */ + uint8_t size; + + /** A collection of flags giving additional information about the + * instructions in this block. + * + * - all instructions in this block were executed speculatively. + */ + uint32_t speculative:1; + + /** - speculative execution was aborted after this block. */ + uint32_t aborted:1; + + /** - speculative execution was committed after this block. */ + uint32_t committed:1; + + /** - tracing was disabled after this block. */ + uint32_t disabled:1; + + /** - tracing was enabled at this block. */ + uint32_t enabled:1; + + /** - tracing was resumed at this block. + * + * In addition to tracing being enabled, it continues from the IP + * at which tracing had been disabled before. + * + * If tracing was disabled at a call instruction, we assume that + * tracing will be re-enabled after returning from the call at the + * instruction following the call instruction. + */ + uint32_t resumed:1; + + /** - normal execution flow was interrupted after this block. */ + uint32_t interrupted:1; + + /** - tracing resumed at this block after an overflow. */ + uint32_t resynced:1; + + /** - tracing was stopped after this block. */ + uint32_t stopped:1; + + /** - the last instruction in this block is truncated. + * + * It starts in this block's section but continues in one or more + * other sections depending on how fragmented the memory image is. + * + * The raw bytes for the last instruction are provided in \@raw and + * its size in \@size in this case. + */ + uint32_t truncated:1; +}; + +/** Allocate an Intel PT block decoder. + * + * The decoder will work on the buffer defined in \@config, it shall contain + * raw trace data and remain valid for the lifetime of the decoder. + * + * The decoder needs to be synchronized before it can be used. + */ +extern pt_export struct pt_block_decoder * +pt_blk_alloc_decoder(const struct pt_config *config); + +/** Free an Intel PT block decoder. + * + * This will destroy the decoder's default image. + * + * The \@decoder must not be used after a successful return. + */ +extern pt_export void pt_blk_free_decoder(struct pt_block_decoder *decoder); + +/** Synchronize an Intel PT block decoder. + * + * Search for the next synchronization point in forward or backward direction. + * + * If \@decoder has not been synchronized, yet, the search is started at the + * beginning of the trace buffer in case of forward synchronization and at the + * end of the trace buffer in case of backward synchronization. + * + * Returns zero or a positive value on success, a negative error code otherwise. + * + * Returns -pte_bad_opc if an unknown packet is encountered. + * Returns -pte_bad_packet if an unknown packet payload is encountered. + * Returns -pte_eos if no further synchronization point is found. + * Returns -pte_invalid if \@decoder is NULL. + */ +extern pt_export int pt_blk_sync_forward(struct pt_block_decoder *decoder); +extern pt_export int pt_blk_sync_backward(struct pt_block_decoder *decoder); + +/** Manually synchronize an Intel PT block decoder. + * + * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB + * packet at \@offset. + * + * Returns zero or a positive value on success, a negative error code otherwise. + * + * Returns -pte_bad_opc if an unknown packet is encountered. + * Returns -pte_bad_packet if an unknown packet payload is encountered. + * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer. + * Returns -pte_eos if \@decoder reaches the end of its trace buffer. + * Returns -pte_invalid if \@decoder is NULL. + * Returns -pte_nosync if there is no syncpoint at \@offset. + */ +extern pt_export int pt_blk_sync_set(struct pt_block_decoder *decoder, + uint64_t offset); + +/** Get the current decoder position. + * + * Fills the current \@decoder position into \@offset. + * + * This is useful for reporting errors. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@offset is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_blk_get_offset(struct pt_block_decoder *decoder, + uint64_t *offset); + +/** Get the position of the last synchronization point. + * + * Fills the last synchronization position into \@offset. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@offset is NULL. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_blk_get_sync_offset(struct pt_block_decoder *decoder, + uint64_t *offset); + +/** Get the traced image. + * + * The returned image may be modified as long as \@decoder is not running. + * + * Returns a pointer to the traced image \@decoder uses for reading memory. + * Returns NULL if \@decoder is NULL. + */ +extern pt_export struct pt_image * +pt_blk_get_image(struct pt_block_decoder *decoder); + +/** Set the traced image. + * + * Sets the image that \@decoder uses for reading memory to \@image. If \@image + * is NULL, sets the image to \@decoder's default image. + * + * Only one image can be active at any time. + * + * Returns zero on success, a negative error code otherwise. + * Return -pte_invalid if \@decoder is NULL. + */ +extern pt_export int pt_blk_set_image(struct pt_block_decoder *decoder, + struct pt_image *image); + +/* Return a pointer to \@decoder's configuration. + * + * Returns a non-null pointer on success, NULL if \@decoder is NULL. + */ +extern pt_export const struct pt_config * +pt_blk_get_config(const struct pt_block_decoder *decoder); + +/** Return the current time. + * + * On success, provides the time at \@decoder's current position in \@time. + * Since \@decoder is reading ahead until the next indirect branch or event, + * the value matches the time for that branch or event. + * + * The time is similar to what a rdtsc instruction would return. Depending + * on the configuration, the time may not be fully accurate. If TSC is not + * enabled, the time is relative to the last synchronization and can't be used + * to correlate with other TSC-based time sources. In this case, -pte_no_time + * is returned and the relative time is provided in \@time. + * + * Some timing-related packets may need to be dropped (mostly due to missing + * calibration or incomplete configuration). To get an idea about the quality + * of the estimated time, we record the number of dropped MTC and CYC packets. + * + * If \@lost_mtc is not NULL, set it to the number of lost MTC packets. + * If \@lost_cyc is not NULL, set it to the number of lost CYC packets. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@time is NULL. + * Returns -pte_no_time if there has not been a TSC packet. + */ +extern pt_export int pt_blk_time(struct pt_block_decoder *decoder, + uint64_t *time, uint32_t *lost_mtc, + uint32_t *lost_cyc); + +/** Return the current core bus ratio. + * + * On success, provides the core:bus ratio at \@decoder's current position + * in \@cbr. + * Since \@decoder is reading ahead until the next indirect branch or event, + * the value matches the core:bus ratio for that branch or event. + * + * The ratio is defined as core cycles per bus clock cycle. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_invalid if \@decoder or \@cbr is NULL. + * Returns -pte_no_cbr if there has not been a CBR packet. + */ +extern pt_export int pt_blk_core_bus_ratio(struct pt_block_decoder *decoder, + uint32_t *cbr); + +/** Determine the next block of instructions. + * + * On success, provides the next block of instructions in execution order in + * \@block. + * + * The \@size argument must be set to sizeof(struct pt_block). + * + * Returns a non-negative pt_status_flag bit-vector on success, a negative error + * code otherwise. + * + * Returns pts_eos to indicate the end of the trace stream. Subsequent calls + * to pt_block_next() will continue to return pts_eos until trace is required + * to determine the next instruction. + * + * Returns -pte_bad_context if the decoder encountered an unexpected packet. + * Returns -pte_bad_opc if the decoder encountered unknown packets. + * Returns -pte_bad_packet if the decoder encountered unknown packet payloads. + * Returns -pte_bad_query if the decoder got out of sync. + * Returns -pte_eos if decoding reached the end of the Intel PT buffer. + * Returns -pte_invalid if \@decoder or \@block is NULL. + * Returns -pte_nomap if the memory at the instruction address can't be read. + * Returns -pte_nosync if \@decoder is out of sync. + */ +extern pt_export int pt_blk_next(struct pt_block_decoder *decoder, + struct pt_block *block, size_t size); + +#ifdef __cplusplus +} +#endif + +#endif /* INTEL_PT_H */ diff --git a/libipt/internal/include/posix/pt_section_posix.h b/libipt/internal/include/posix/pt_section_posix.h new file mode 100644 index 0000000..a731692 --- /dev/null +++ b/libipt/internal/include/posix/pt_section_posix.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_SECTION_POSIX_H +#define PT_SECTION_POSIX_H + +#include +#include + +struct pt_section; + + +/* Fstat-based file status. */ +struct pt_sec_posix_status { + /* The file status. */ + struct stat stat; +}; + +/* MMAP-based section mapping information. */ +struct pt_sec_posix_mapping { + /* The mmap base address. */ + uint8_t *base; + + /* The mapped memory size. */ + uint64_t size; + + /* The begin and end of the mapped memory. */ + const uint8_t *begin, *end; +}; + + +/* Map a section. + * + * On success, sets @section's mapping, unmap, and read pointers. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section or @file are NULL. + * Returns -pte_invalid if @section can't be mapped. + */ +extern int pt_sec_posix_map(struct pt_section *section, int fd); + +/* Unmap a section. + * + * On success, clears @section's mapping, unmap, and read pointers. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section is NULL. + * Returns -pte_internal if @section has not been mapped. + */ +extern int pt_sec_posix_unmap(struct pt_section *section); + +/* Read memory from an mmaped section. + * + * Reads at most @size bytes from @section at @offset into @buffer. + * + * Returns the number of bytes read on success, a negative error code otherwise. + * Returns -pte_invalid if @section or @buffer are NULL. + * Returns -pte_nomap if @offset is beyond the end of the section. + */ +extern int pt_sec_posix_read(const struct pt_section *section, uint8_t *buffer, + uint16_t size, uint64_t offset); + +#endif /* PT_SECTION_POSIX_H */ diff --git a/libipt/internal/include/pt_asid.h b/libipt/internal/include/pt_asid.h new file mode 100644 index 0000000..f5a15a8 --- /dev/null +++ b/libipt/internal/include/pt_asid.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_ASID_H +#define PT_ASID_H + +struct pt_asid; + + +/* Read an asid provided by our user. + * + * Translate a user-provided asid in @user into @asid. This uses default values + * for fields that are not provided by the user and for all fields, if @user is + * NULL. + * + * Fields set in @user that are not known (i.e. from a newer version of this + * library) will be ignored. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal, if @asid is NULL. + */ +extern int pt_asid_from_user(struct pt_asid *asid, const struct pt_asid *user); + +/* Match two asids. + * + * Asids match if all fields provide either default values or are identical. + * + * Returns a positive number if @lhs matches @rhs. + * Returns zero if @lhs does not match @rhs. + * Returns a negative error code otherwise. + * + * Returns -pte_internal if @lhs or @rhs are NULL. + */ +extern int pt_asid_match(const struct pt_asid *lhs, const struct pt_asid *rhs); + +#endif /* PT_ASID_H */ diff --git a/libipt/internal/include/pt_block_cache.h b/libipt/internal/include/pt_block_cache.h new file mode 100644 index 0000000..0e87342 --- /dev/null +++ b/libipt/internal/include/pt_block_cache.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2016-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_BLOCK_CACHE_H +#define PT_BLOCK_CACHE_H + +#include "intel-pt.h" + +#include + + +/* A block cache entry qualifier. + * + * This describes what to do at the decision point determined by a block cache + * entry. + */ +enum pt_bcache_qualifier { + /* This is not a decision point. + * + * The next decision point is too far away and one or more fields + * threatened to overflow so we had to stop somewhere on our way. + * + * Apply the displacement and number of instructions and continue from + * the resulting IP. + */ + ptbq_again, + + /* The decision point is a conditional branch. + * + * This requires a conditional branch query. + * + * The isize field should provide the size of the branch instruction so + * only taken branches require the instruction to be decoded. + */ + ptbq_cond, + + /* The decision point is a near indirect call. + * + * This requires a return-address stack update and an indirect branch + * query. + * + * The isize field should provide the size of the call instruction so + * the return address can be computed by adding it to the displacement + * that brings us to the call instruction. + * + * No instruction decode is required. + */ + ptbq_ind_call, + + /* The decision point is a near return. + * + * The return may be compressed so this requires a conditional branch + * query to determine the compression state and either a return-address + * stack lookup or an indirect branch query. + * + * No instruction decode is required. + */ + ptbq_return, + + /* The decision point is an indirect jump or far branch. + * + * This requires an indirect branch query. + * + * No instruction decode is required. + */ + ptbq_indirect, + + /* The decision point requires the instruction at the decision point IP + * to be decoded to determine the next step. + * + * This is used for + * + * - near direct calls that need to maintain the return-address stack. + * + * - near direct jumps that are too far away to be handled with a + * block cache entry as they would overflow the displacement field. + */ + ptbq_decode +}; + +/* A block cache entry. + * + * There will be one such entry per byte of decoded memory image. Each entry + * corresponds to an IP in the traced memory image. The cache is initialized + * with invalid entries for all IPs. + * + * Only entries for the first byte of each instruction will be used; other + * entries are ignored and will remain invalid. + * + * Each valid entry gives the distance from the entry's IP to the next decision + * point both in bytes and in the number of instructions. + */ +struct pt_bcache_entry { + /* The displacement to the next decision point in bytes. + * + * This is zero if we are at a decision point except for ptbq_again + * where it gives the displacement to the next block cache entry to be + * used. + */ + int32_t displacement:16; + + /* The number of instructions to the next decision point. + * + * This is typically one at a decision point since we are already + * accounting for the instruction at the decision point. + * + * Note that this field must be smaller than the respective struct + * pt_block field so we can fit one block cache entry into an empty + * block. + */ + uint32_t ninsn:8; + + /* The execution mode for all instruction between here and the next + * decision point. + * + * This is enum pt_exec_mode. + * + * This is ptem_unknown if the entry is not valid. + */ + uint32_t mode:2; + + /* The decision point qualifier. + * + * This is enum pt_bcache_qualifier. + */ + uint32_t qualifier:3; + + /* The size of the instruction at the decision point. + * + * This is zero if the size is too big to fit into the field. In this + * case, the instruction needs to be decoded to determine its size. + */ + uint32_t isize:3; +}; + +/* Get the execution mode of a block cache entry. */ +static inline enum pt_exec_mode pt_bce_exec_mode(struct pt_bcache_entry bce) +{ + return (enum pt_exec_mode) bce.mode; +} + +/* Get the block cache qualifier of a block cache entry. */ +static inline enum pt_bcache_qualifier +pt_bce_qualifier(struct pt_bcache_entry bce) +{ + return (enum pt_bcache_qualifier) bce.qualifier; +} + +/* Check if a block cache entry is valid. */ +static inline int pt_bce_is_valid(struct pt_bcache_entry bce) +{ + return pt_bce_exec_mode(bce) != ptem_unknown; +} + + + +/* A block cache. */ +struct pt_block_cache { + /* The number of cache entries. */ + uint32_t nentries; + + /* A variable-length array of @nentries entries. */ + struct pt_bcache_entry entry[]; +}; + +/* Create a block cache. + * + * @nentries is the number of entries in the cache and should match the size of + * the to-be-cached section in bytes. + */ +extern struct pt_block_cache *pt_bcache_alloc(uint64_t nentries); + +/* Destroy a block cache. */ +extern void pt_bcache_free(struct pt_block_cache *bcache); + +/* Cache a block. + * + * It is expected that all calls for the same @index write the same @bce. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @bcache is NULL. + * Returns -pte_internal if @index is outside of @bcache. + */ +extern int pt_bcache_add(struct pt_block_cache *bcache, uint64_t index, + struct pt_bcache_entry bce); + +/* Lookup a cached block. + * + * The returned cache entry need not be valid. The caller is expected to check + * for validity using pt_bce_is_valid(*@bce). + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @bcache or @bce is NULL. + * Returns -pte_internal if @index is outside of @bcache. + */ +extern int pt_bcache_lookup(struct pt_bcache_entry *bce, + const struct pt_block_cache *bcache, + uint64_t index); + +#endif /* PT_BLOCK_CACHE_H */ diff --git a/libipt/internal/include/pt_block_decoder.h b/libipt/internal/include/pt_block_decoder.h new file mode 100644 index 0000000..3dbb240 --- /dev/null +++ b/libipt/internal/include/pt_block_decoder.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2016-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_BLOCK_DECODER_H +#define PT_BLOCK_DECODER_H + +#include "pt_query_decoder.h" +#include "pt_image.h" +#include "pt_retstack.h" + +struct pt_section; + + +/* A cached mapped section. + * + * This caches a single mapped section across pt_blk_next() calls to avoid + * repeated get/map and unmap/put of the current section. + * + * Since we can't guarantee that the image doesn't change between pt_blk_next() + * calls, we still need to validate that the cached section is accurate. This + * can be done without additional get/put or map/unmap of the cached section, + * though, and is significantly cheaper. + */ +struct pt_cached_section { + /* The cached section. + * + * The cache is valid if and only if @section is non-NULL. + * + * It needs to be unmapped and put. Use pt_blk_scache_invalidate() to + * release the cached section and to invalidate the cache. + */ + struct pt_section *section; + + /* The virtual address at which @section was loaded. */ + uint64_t laddr; + + /* The section identifier. */ + int isid; +}; + +/* A block decoder. + * + * It decodes Intel(R) Processor Trace into a sequence of instruction blocks + * such that the instructions in each block can be decoded without further need + * of trace. + */ +struct pt_block_decoder { + /* The Intel(R) Processor Trace query decoder. */ + struct pt_query_decoder query; + + /* The configuration flags. + * + * Those are our flags set by the user. In @query.config.flags, we set + * the flags we need for the query decoder. + */ + struct pt_conf_flags flags; + + /* The default image. */ + struct pt_image default_image; + + /* The image. */ + struct pt_image *image; + + /* The current cached section. */ + struct pt_cached_section scache; + + /* The current address space. */ + struct pt_asid asid; + + /* The current Intel(R) Processor Trace event. */ + struct pt_event event; + + /* The call/return stack for ret compression. */ + struct pt_retstack retstack; + + /* The start IP of the next block. + * + * If tracing is disabled, this is the IP at which we assume tracing to + * be resumed. + */ + uint64_t ip; + + /* The current execution mode. */ + enum pt_exec_mode mode; + + /* The status of the last successful decoder query. + * + * Errors are reported directly; the status is always a non-negative + * pt_status_flag bit-vector. + */ + int status; + + /* A collection of flags defining how to proceed flow reconstruction: + * + * - tracing is enabled. + */ + uint32_t enabled:1; + + /* - process @event. */ + uint32_t process_event:1; + + /* - instructions are executed speculatively. */ + uint32_t speculative:1; +}; + + +/* Initialize a block decoder. + * + * Returns zero on success; a negative error code otherwise. + * Returns -pte_internal, if @decoder or @config is NULL. + */ +extern int pt_blk_decoder_init(struct pt_block_decoder *decoder, + const struct pt_config *config); + +/* Finalize a block decoder. */ +extern void pt_blk_decoder_fini(struct pt_block_decoder *decoder); + +#endif /* PT_BLOCK_DECODER_H */ diff --git a/libipt/internal/include/pt_config.h b/libipt/internal/include/pt_config.h new file mode 100644 index 0000000..18fccf0 --- /dev/null +++ b/libipt/internal/include/pt_config.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "intel-pt.h" + + +/* Read the configuration provided by a library user and zero-initialize + * missing fields. + * + * We keep the user's size value if it is smaller than sizeof(*@config) to + * allow decoders to detect missing configuration bits. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @config is NULL. + * Returns -pte_invalid if @uconfig is NULL. + * Returns -pte_bad_config if @config is too small to be useful. + */ +extern int pt_config_from_user(struct pt_config *config, + const struct pt_config *uconfig); diff --git a/libipt/internal/include/pt_cpu.h b/libipt/internal/include/pt_cpu.h new file mode 100644 index 0000000..7eea086 --- /dev/null +++ b/libipt/internal/include/pt_cpu.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_CPU_H +#define PT_CPU_H + +struct pt_cpu; + +/* Parses @s which should be of format family/model[/stepping] and + * stores the value in @cpu on success. + * The optional stepping defaults to 0 if omitted. + * + * Returns 0 on success. + * Returns -pte_invalid if @cpu or @s is NULL. + * Returns -pte_invalid if @s could not be parsed. + */ +extern int pt_cpu_parse(struct pt_cpu *cpu, const char *s); + +/* Get the cpu we're running on. + * + * Reads the family/model/stepping of the processor on which this function + * is executed and stores the value in @cpu. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_invalid if @cpu is NULL. + */ +extern int pt_cpu_read(struct pt_cpu *cpu); + +#endif /* PT_CPU_H */ diff --git a/libipt/internal/include/pt_cpuid.h b/libipt/internal/include/pt_cpuid.h new file mode 100644 index 0000000..83de5c2 --- /dev/null +++ b/libipt/internal/include/pt_cpuid.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_CPUID_H +#define PT_CPUID_H + +#include + +/* Execute cpuid with @leaf set in the eax register. + * The result is stored in @eax, @ebx, @ecx and @edx. + */ +extern void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx); + +#endif /* PT_CPUID_H */ diff --git a/libipt/internal/include/pt_decoder_function.h b/libipt/internal/include/pt_decoder_function.h new file mode 100644 index 0000000..70c7624 --- /dev/null +++ b/libipt/internal/include/pt_decoder_function.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_DECODER_FUNCTION_H +#define PT_DECODER_FUNCTION_H + +#include + +struct pt_query_decoder; +struct pt_packet_decoder; +struct pt_packet; +struct pt_config; + + +/* Intel(R) Processor Trace decoder function flags. */ +enum pt_decoder_function_flag { + /* The decoded packet contains an unconditional branch destination. */ + pdff_tip = 1 << 0, + + /* The decode packet contains unconditional branch destinations. */ + pdff_tnt = 1 << 1, + + /* The decoded packet contains an event. */ + pdff_event = 1 << 2, + + /* The decoded packet marks the end of a PSB header. */ + pdff_psbend = 1 << 3, + + /* The decoded packet contains a non-branch IP update. */ + pdff_fup = 1 << 4, + + /* The decoded packet is unknown to the decoder. */ + pdff_unknown = 1 << 5, + + /* The decoded packet contains timing information. */ + pdff_timing = 1 << 6, + + /* The decoded packet contains padding. */ + pdff_pad = 1 << 7 +}; + +/* An Intel(R) Processor Trace decoder function. */ +struct pt_decoder_function { + /* The function to analyze the next packet. */ + int (*packet)(struct pt_packet_decoder *, struct pt_packet *); + + /* The function to decode the next packet. */ + int (*decode)(struct pt_query_decoder *); + + /* The function to decode the next packet in segment header + * context, i.e. between PSB and ENDPSB. + */ + int (*header)(struct pt_query_decoder *); + + /* Decoder function flags. */ + int flags; +}; + + +/* Fetch the decoder function. + * + * Sets @dfun to the decoder function for decoding the packet at @pos. + * + * Returns 0 on success. + * Returns -pte_internal if @dfun or @config is NULL. + * Returns -pte_nosync if @pos is NULL or outside @config's trace buffer. + * Returns -pte_eos if the opcode is incomplete or missing. + */ +extern int pt_df_fetch(const struct pt_decoder_function **dfun, + const uint8_t *pos, const struct pt_config *config); + + +/* Decoder functions for the various packet types. + * + * Do not call those functions directly! + */ +extern const struct pt_decoder_function pt_decode_unknown; +extern const struct pt_decoder_function pt_decode_pad; +extern const struct pt_decoder_function pt_decode_psb; +extern const struct pt_decoder_function pt_decode_tip; +extern const struct pt_decoder_function pt_decode_tnt_8; +extern const struct pt_decoder_function pt_decode_tnt_64; +extern const struct pt_decoder_function pt_decode_tip_pge; +extern const struct pt_decoder_function pt_decode_tip_pgd; +extern const struct pt_decoder_function pt_decode_fup; +extern const struct pt_decoder_function pt_decode_pip; +extern const struct pt_decoder_function pt_decode_ovf; +extern const struct pt_decoder_function pt_decode_mode; +extern const struct pt_decoder_function pt_decode_psbend; +extern const struct pt_decoder_function pt_decode_tsc; +extern const struct pt_decoder_function pt_decode_cbr; +extern const struct pt_decoder_function pt_decode_tma; +extern const struct pt_decoder_function pt_decode_mtc; +extern const struct pt_decoder_function pt_decode_cyc; +extern const struct pt_decoder_function pt_decode_stop; +extern const struct pt_decoder_function pt_decode_vmcs; +extern const struct pt_decoder_function pt_decode_mnt; + +#endif /* PT_DECODER_FUNCTION_H */ diff --git a/libipt/internal/include/pt_encoder.h b/libipt/internal/include/pt_encoder.h new file mode 100644 index 0000000..2831a41 --- /dev/null +++ b/libipt/internal/include/pt_encoder.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_ENCODER_H +#define PT_ENCODER_H + +#include "intel-pt.h" + + +/* An Intel PT packet encoder. */ +struct pt_encoder { + /* The encoder configuration. */ + struct pt_config config; + + /** The current position in the trace buffer. */ + uint8_t *pos; +}; + + +/* Initialize the packet encoder. + * + * Returns zero on success, a negative error code otherwise. + */ +extern int pt_encoder_init(struct pt_encoder *, const struct pt_config *); + +/* Finalize the packet encoder. */ +extern void pt_encoder_fini(struct pt_encoder *); + + +/* The below functions are convenience wrappers around pt_enc_next(). */ + +/* Encode a Padding (pad) packet. */ +extern int pt_encode_pad(struct pt_encoder *); + +/* Encode a Packet Stream Boundary (psb) packet. */ +extern int pt_encode_psb(struct pt_encoder *); + +/* Encode an End PSB (psbend) packet. */ +extern int pt_encode_psbend(struct pt_encoder *); + +/* Encode a Target Instruction Pointer (tip) packet. */ +extern int pt_encode_tip(struct pt_encoder *, uint64_t ip, + enum pt_ip_compression ipc); + +/* Encode a Taken Not Taken (tnt) packet - 8-bit version. */ +extern int pt_encode_tnt_8(struct pt_encoder *, uint8_t tnt, int size); + +/* Encode a Taken Not Taken (tnt) packet - 64-bit version. */ +extern int pt_encode_tnt_64(struct pt_encoder *, uint64_t tnt, int size); + +/* Encode a Packet Generation Enable (tip.pge) packet. */ +extern int pt_encode_tip_pge(struct pt_encoder *, uint64_t ip, + enum pt_ip_compression ipc); + +/* Encode a Packet Generation Disable (tip.pgd) packet. */ +extern int pt_encode_tip_pgd(struct pt_encoder *, uint64_t ip, + enum pt_ip_compression ipc); + +/* Encode a Flow Update Packet (fup). */ +extern int pt_encode_fup(struct pt_encoder *, uint64_t ip, + enum pt_ip_compression ipc); + +/* Encode a Paging Information Packet (pip). */ +extern int pt_encode_pip(struct pt_encoder *, uint64_t cr3, uint8_t flags); + +/* Encode a Overflow Packet (ovf). */ +extern int pt_encode_ovf(struct pt_encoder *); + +/* Encode a Mode Exec Packet (mode.exec). */ +extern int pt_encode_mode_exec(struct pt_encoder *, enum pt_exec_mode); + +/* Encode a Mode Tsx Packet (mode.tsx). */ +extern int pt_encode_mode_tsx(struct pt_encoder *, uint8_t); + +/* Encode a Time Stamp Counter (tsc) packet. */ +extern int pt_encode_tsc(struct pt_encoder *, uint64_t); + +/* Encode a Core Bus Ratio (cbr) packet. */ +extern int pt_encode_cbr(struct pt_encoder *, uint8_t); + +/* Encode a TSC/MTC Alignment (tma) packet. */ +extern int pt_encode_tma(struct pt_encoder *, uint16_t ctc, + uint16_t fc); + +/* Encode a Mini Time Counter (mtc) packet. */ +extern int pt_encode_mtc(struct pt_encoder *, uint8_t ctc); + +/* Encode a Cycle Count (cyc) packet. */ +extern int pt_encode_cyc(struct pt_encoder *, uint32_t cyc); + +/* Encode a TraceStop Packet (stop). */ +extern int pt_encode_stop(struct pt_encoder *); + +/* Encode a VMCS packet. */ +extern int pt_encode_vmcs(struct pt_encoder *, uint64_t); + +/* Encode a Maintenance (mnt) packet. */ +extern int pt_encode_mnt(struct pt_encoder *, uint64_t); + +#endif /* PT_ENCODER_H */ diff --git a/libipt/internal/include/pt_event_queue.h b/libipt/internal/include/pt_event_queue.h new file mode 100644 index 0000000..dd56f55 --- /dev/null +++ b/libipt/internal/include/pt_event_queue.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_EVENT_QUEUE_H +#define PT_EVENT_QUEUE_H + +#include "intel-pt.h" + +#include + + +/* Events are grouped by the packet the event binds to. */ +enum pt_event_binding { + evb_psbend, + evb_tip, + evb_fup, + + evb_max +}; + +enum { + /* The maximal number of pending events - should be a power of two. */ + evq_max = 8 +}; + +/* A queue of events. */ +struct pt_event_queue { + /* A collection of event queues, one per binding. */ + struct pt_event queue[evb_max][evq_max]; + + /* The begin and end indices for the above event queues. */ + uint8_t begin[evb_max]; + uint8_t end[evb_max]; + + /* A standalone event to be published immediately. */ + struct pt_event standalone; +}; + + +/* Initialize (or reset) an event queue. */ +extern void pt_evq_init(struct pt_event_queue *); + +/* Get a standalone event. + * + * Returns a pointer to the standalone event on success. + * Returns NULL if @evq is NULL. + */ +extern struct pt_event *pt_evq_standalone(struct pt_event_queue *evq); + +/* Enqueue an event. + * + * Adds a new event to @evq for binding @evb. + * + * Returns a pointer to the new event on success. + * Returns NULL if @evq is NULL or @evb is invalid. + * Returns NULL if @evq is full. + */ +extern struct pt_event *pt_evq_enqueue(struct pt_event_queue *evq, + enum pt_event_binding evb); + + +/* Dequeue an event. + * + * Removes the first event for binding @evb from @evq. + * + * Returns a pointer to the dequeued event on success. + * Returns NULL if @evq is NULL or @evb is invalid. + * Returns NULL if @evq is empty. + */ +extern struct pt_event *pt_evq_dequeue(struct pt_event_queue *evq, + enum pt_event_binding evb); + +/* Clear a queue and discard events. + * + * Removes all events for binding @evb from @evq. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @evq is NULL or @evb is invalid. + */ +extern int pt_evq_clear(struct pt_event_queue *evq, + enum pt_event_binding evb); + +/* Check for emptiness. + * + * Check if @evq for binding @evb is empty. + * + * Returns a positive number if @evq is empty. + * Returns zero if @evq is not empty. + * Returns -pte_internal if @evq is NULL or @evb is invalid. + */ +extern int pt_evq_empty(const struct pt_event_queue *evq, + enum pt_event_binding evb); + +/* Check for non-emptiness. + * + * Check if @evq for binding @evb contains pending events. + * + * Returns a positive number if @evq is not empty. + * Returns zero if @evq is empty. + * Returns -pte_internal if @evq is NULL or @evb is invalid. + */ +extern int pt_evq_pending(const struct pt_event_queue *evq, + enum pt_event_binding evb); + +/* Find an event by type. + * + * Searches @evq for binding @evb for an event of type @evt. + * + * Returns a pointer to the first matching event on success. + * Returns NULL if there is no such event. + * Returns NULL if @evq is NULL. + * Returns NULL if @evb or @evt is invalid. + */ +extern struct pt_event *pt_evq_find(struct pt_event_queue *evq, + enum pt_event_binding evb, + enum pt_event_type evt); + +#endif /* PT_EVENT_QUEUE_H */ diff --git a/libipt/internal/include/pt_ild.h b/libipt/internal/include/pt_ild.h new file mode 100644 index 0000000..0bf0b04 --- /dev/null +++ b/libipt/internal/include/pt_ild.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#if !defined(PT_ILD_H) +#define PT_ILD_H + +#include "pt_insn.h" + +#include "intel-pt.h" + + +typedef enum { + PTI_MAP_0, /* 1-byte opcodes. may have modrm */ + PTI_MAP_1, /* 2-byte opcodes (0x0f). may have modrm */ + PTI_MAP_2, /* 3-byte opcodes (0x0f38). has modrm */ + PTI_MAP_3, /* 3-byte opcodes (0x0f3a). has modrm */ + PTI_MAP_AMD3DNOW, /* 3d-now opcodes (0x0f0f). has modrm */ + PTI_MAP_INVALID +} pti_map_enum_t; + +struct pt_ild { + /* inputs */ + uint8_t const *itext; + uint8_t max_bytes; /*1..15 bytes */ + enum pt_exec_mode mode; + + union { + struct { + uint32_t osz:1; + uint32_t asz:1; + uint32_t lock:1; + uint32_t f3:1; + uint32_t f2:1; + uint32_t last_f2f3:2; /* 2 or 3 */ + /* The vex bit is set for c4/c5 VEX and EVEX. */ + uint32_t vex:1; + /* The REX.R and REX.W bits in REX, VEX, or EVEX. */ + uint32_t rex_r:1; + uint32_t rex_w:1; + } s; + uint32_t i; + } u; + uint8_t imm1_bytes; /* # of bytes in 1st immediate */ + uint8_t imm2_bytes; /* # of bytes in 2nd immediate */ + uint8_t disp_bytes; /* # of displacement bytes */ + uint8_t modrm_byte; + /* 5b but valid values= 0,1,2,3 could be in bit union */ + uint8_t map; + uint8_t rex; /* 0b0100wrxb */ + uint8_t nominal_opcode; + uint8_t disp_pos; + /* imm_pos can be derived from disp_pos + disp_bytes. */ +}; + +static inline pti_map_enum_t pti_get_map(const struct pt_ild *ild) +{ + return (pti_map_enum_t) ild->map; +} + +static inline uint8_t pti_get_modrm_mod(const struct pt_ild *ild) +{ + return ild->modrm_byte >> 6; +} + +static inline uint8_t pti_get_modrm_reg(const struct pt_ild *ild) +{ + return (ild->modrm_byte >> 3) & 7; +} + +static inline uint8_t pti_get_modrm_rm(const struct pt_ild *ild) +{ + return ild->modrm_byte & 7; +} + +/* MAIN ENTRANCE POINTS */ + +/* one time call. not thread safe init. call when single threaded. */ +extern void pt_ild_init(void); + +/* all decoding is multithread safe. */ + +/* Decode one instruction. + * + * Input: + * + * @insn->ip: the virtual address of the instruction + * @insn->raw: the memory at that virtual address + * @insn->size: the maximal size of the instruction + * @insn->mode: the execution mode + * + * Output: + * + * @insn->size: the actual size of the instruction + * @insn->iclass: a coarse classification + * + * @iext->iclass: a finer grain classification + * @iext->variant: instruction class dependent information + * + * Returns zero on success, a negative error code otherwise. + */ +extern int pt_ild_decode(struct pt_insn *insn, struct pt_insn_ext *iext); + +#endif /* PT_ILD_H */ diff --git a/libipt/internal/include/pt_image.h b/libipt/internal/include/pt_image.h new file mode 100644 index 0000000..b6364c6 --- /dev/null +++ b/libipt/internal/include/pt_image.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_IMAGE_H +#define PT_IMAGE_H + +#include "pt_mapped_section.h" + +#include "intel-pt.h" + +#include + + +/* A list of sections. */ +struct pt_section_list { + /* The next list element. */ + struct pt_section_list *next; + + /* The mapped section. */ + struct pt_mapped_section section; + + /* The image section identifier. */ + int isid; + + /* A flag saying whether @section is already mapped. */ + uint32_t mapped:1; +}; + +/* A traced image consisting of a collection of sections. */ +struct pt_image { + /* The optional image name. */ + char *name; + + /* The list of sections. */ + struct pt_section_list *sections; + + /* An optional read memory callback. */ + struct { + /* The callback function. */ + read_memory_callback_t *callback; + + /* The callback context. */ + void *context; + } readmem; + + /* The cache size as number of to-keep-mapped sections. */ + uint16_t cache; + + /* The number of permanently mapped sections. */ + uint16_t mapped; +}; + +/* Initialize an image with an optional @name. */ +extern void pt_image_init(struct pt_image *image, const char *name); + +/* Finalize an image. + * + * This removes all sections and frees the name. + */ +extern void pt_image_fini(struct pt_image *image); + +/* Add a section to an image. + * + * Add @section identified by @isid to @image at @vaddr in @asid. If @section + * overlaps with existing sections, the existing sections are shrunk, split, or + * removed to accomodate @section. Absence of a section identifier is indicated + * by an @isid of zero. + * + * Returns zero on success. + * Returns -pte_internal if @image, @section, or @asid is NULL. + */ +extern int pt_image_add(struct pt_image *image, struct pt_section *section, + const struct pt_asid *asid, uint64_t vaddr, int isid); + +/* Remove a section from an image. + * + * Returns zero on success. + * Returns -pte_internal if @image, @section, or @asid is NULL. + * Returns -pte_bad_image if @image does not contain @section at @vaddr. + */ +extern int pt_image_remove(struct pt_image *image, struct pt_section *section, + const struct pt_asid *asid, uint64_t vaddr); + +/* Read memory from an image. + * + * Reads at most @size bytes from @image at @addr in @asid into @buffer. + * + * Returns the number of bytes read on success, a negative error code otherwise. + * Returns -pte_internal if @image, @isid, @buffer, or @asid is NULL. + * Returns -pte_nomap if the section does not contain @addr. + */ +extern int pt_image_read(struct pt_image *image, int *isid, uint8_t *buffer, + uint16_t size, const struct pt_asid *asid, + uint64_t addr); + +/* Find an image section. + * + * Find the section containing @vaddr in @asid and provide a reference to it in + * @section and its load address in @laddr. The caller needs to put the + * reference to @section after use. + * + * Returns the section's identifier on success, a negative error code otherwise. + * Returns -pte_internal if @image, @section, @laddr, or @asid is NULL. + * Returns -pte_nomap if there is no such section in @image. + */ +extern int pt_image_find(struct pt_image *image, struct pt_section **section, + uint64_t *laddr, const struct pt_asid *asid, + uint64_t vaddr); + +/* Validate an image section. + * + * Validate that a lookup by @asid and @vaddr in @image would result in @section + * loaded at @laddr identified by @isid. + * + * Validation may fail sporadically, e.g. if @section has been evicted from + * @image's LRU cache. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_invalid if @image or @asid is NULL. + * Returns -pte_nomap if validation failed. + */ +extern int pt_image_validate(const struct pt_image *image, + const struct pt_asid *asid, uint64_t vaddr, + const struct pt_section *section, uint64_t laddr, + int isid); + +#endif /* PT_IMAGE_H */ diff --git a/libipt/internal/include/pt_image_section_cache.h b/libipt/internal/include/pt_image_section_cache.h new file mode 100644 index 0000000..4e65ecb --- /dev/null +++ b/libipt/internal/include/pt_image_section_cache.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2016-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_IMAGE_SECTION_CACHE_H +#define PT_IMAGE_SECTION_CACHE_H + +#include + +#if defined(FEATURE_THREADS) +# include +#endif /* defined(FEATURE_THREADS) */ + +struct pt_section; + + +/* An image section cache entry. */ +struct pt_iscache_entry { + /* The section object. + * + * We hold a reference to the section - put it when the section is + * removed from the cache. + */ + struct pt_section *section; + + /* The base address at which @section has been loaded. */ + uint64_t laddr; +}; + +/* A cache of image sections and their load addresses. + * + * We combine the section with its load address to reduce the amount of + * information we need to store in order to read from a cached section by + * virtual address. + * + * Internally, the section object will be shared if it is loaded at different + * addresses in the cache. + * + * The cache does not consider the address-space the section is mapped into. + * This is not relevant for reading from the section. + */ +struct pt_image_section_cache { + /* The optional name of the cache; NULL if not named. */ + char *name; + + /* An array of @nentries cached sections. */ + struct pt_iscache_entry *entries; + +#if defined(FEATURE_THREADS) + /* A lock protecting this image section cache. */ + mtx_t lock; +#endif /* defined(FEATURE_THREADS) */ + + /* The capacity of the @entries array. + * + * Cached sections are identified by a positive integer, the image + * section identifier (isid), which is derived from their index into the + * @entries array. + * + * We can't expand the section cache capacity beyond INT_MAX. + */ + uint16_t capacity; + + /* The current size of the cache in number of entries. + * + * This is smaller than @capacity if there is still room in the @entries + * array; equal to @capacity if the @entries array is full and needs to + * be reallocated. + */ + uint16_t size; +}; + + +/* Initialize an image section cache. */ +extern int pt_iscache_init(struct pt_image_section_cache *iscache, + const char *name); + +/* Finalize an image section cache. */ +extern void pt_iscache_fini(struct pt_image_section_cache *iscache); + +/* Add a section to the cache. + * + * Adds @section at @laddr to @iscache and returns its isid. If a similar + * section is already cached, returns that section's isid, instead. + * + * We take a full section rather than its filename and range in that file to + * avoid the dependency to pt_section.h. Callers are expected to query the + * cache before creating the section, so we should only see unnecessary section + * creation/destruction on insertion races. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @iscache or @section is NULL. + * Returns -pte_internal if @section's filename is NULL. + */ +extern int pt_iscache_add(struct pt_image_section_cache *iscache, + struct pt_section *section, uint64_t laddr); + +/* Find a section in the cache. + * + * Returns a positive isid if a section matching @filename, @offset, @size + * loaded at @laddr is found in @iscache. + * Returns zero if no such section is found. + * Returns a negative error code otherwise. + * Returns -pte_internal if @iscache or @filename is NULL. + */ +extern int pt_iscache_find(struct pt_image_section_cache *iscache, + const char *filename, uint64_t offset, + uint64_t size, uint64_t laddr); + +/* Lookup the section identified by its isid. + * + * Provides a reference to the section in @section and its load address in + * @laddr on success. The caller is expected to put the returned section after + * use. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @iscache, @section, or @laddr is NULL. + * Returns -pte_bad_image if @iscache does not contain @isid. + */ +extern int pt_iscache_lookup(struct pt_image_section_cache *iscache, + struct pt_section **section, uint64_t *laddr, + int isid); + +/* Clear an image section cache. */ +extern int pt_iscache_clear(struct pt_image_section_cache *iscache); + +#endif /* PT_IMAGE_SECTION_CACHE_H */ diff --git a/libipt/internal/include/pt_insn.h b/libipt/internal/include/pt_insn.h new file mode 100644 index 0000000..452baf0 --- /dev/null +++ b/libipt/internal/include/pt_insn.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2016-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_INSN_H +#define PT_INSN_H + +#include + +#include "intel-pt.h" + +struct pt_insn_ext; + + +/* A finer-grain classification of instructions used internally. */ +typedef enum { + PTI_INST_INVALID, + + PTI_INST_CALL_9A, + PTI_INST_CALL_FFr3, + PTI_INST_CALL_FFr2, + PTI_INST_CALL_E8, + PTI_INST_INT, + + PTI_INST_INT3, + PTI_INST_INT1, + PTI_INST_INTO, + PTI_INST_IRET, /* includes IRETD and IRETQ (EOSZ determines) */ + + PTI_INST_JMP_E9, + PTI_INST_JMP_EB, + PTI_INST_JMP_EA, + PTI_INST_JMP_FFr5, /* REXW? */ + PTI_INST_JMP_FFr4, + PTI_INST_JCC, + PTI_INST_JrCXZ, + PTI_INST_LOOP, + PTI_INST_LOOPE, /* aka Z */ + PTI_INST_LOOPNE, /* aka NE */ + + PTI_INST_MOV_CR3, + + PTI_INST_RET_C3, + PTI_INST_RET_C2, + PTI_INST_RET_CB, + PTI_INST_RET_CA, + + PTI_INST_SYSCALL, + PTI_INST_SYSENTER, + PTI_INST_SYSEXIT, + PTI_INST_SYSRET, + + PTI_INST_VMLAUNCH, + PTI_INST_VMRESUME, + PTI_INST_VMCALL, + PTI_INST_VMPTRLD, + + PTI_INST_LAST +} pti_inst_enum_t; + +/* Information about an instruction we need internally in addition to the + * information provided in struct pt_insn. + */ +struct pt_insn_ext { + /* A more detailed instruction class. */ + pti_inst_enum_t iclass; + + /* Instruction-specific information. */ + union { + /* For branch instructions. */ + struct { + /* The branch displacement. + * + * This is only valid for direct calls/jumps. + * + * The displacement is applied to the address of the + * instruction following the branch. + */ + int32_t displacement; + + /* A flag saying whether the branch is direct. + * + * non-zero: direct + * zero: indirect + * + * This is expected to go away someday when we extend + * enum pt_insn_class to distinguish direct and indirect + * branches. + */ + uint8_t is_direct; + } branch; + } variant; +}; + + +/* Check if the instruction @insn/@iext changes the current privilege level. + * + * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL). + */ +extern int pt_insn_changes_cpl(const struct pt_insn *insn, + const struct pt_insn_ext *iext); + +/* Check if the instruction @insn/@iext changes CR3. + * + * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL). + */ +extern int pt_insn_changes_cr3(const struct pt_insn *insn, + const struct pt_insn_ext *iext); + +/* Check if the instruction @insn/@iext is a (near or far) branch. + * + * Returns non-zero if it is, zero if it isn't (or @insn/@iext is NULL). + */ +extern int pt_insn_is_branch(const struct pt_insn *insn, + const struct pt_insn_ext *iext); + +/* Check if the instruction @insn/@iext is a far branch. + * + * Returns non-zero if it is, zero if it isn't (or @insn/@iext is NULL). + */ +extern int pt_insn_is_far_branch(const struct pt_insn *insn, + const struct pt_insn_ext *iext); + +/* Check if the instruction @insn/@iext binds to a PIP packet. + * + * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL). + */ +extern int pt_insn_binds_to_pip(const struct pt_insn *insn, + const struct pt_insn_ext *iext); + +/* Check if the instruction @insn/@iext binds to a VMCS packet. + * + * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL). + */ +extern int pt_insn_binds_to_vmcs(const struct pt_insn *insn, + const struct pt_insn_ext *iext); + +/* Determine the IP of the next instruction. + * + * Tries to determine the IP of the next instruction without using trace and + * provides it in @ip unless @ip is NULL. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_bad_query if the IP can't be determined. + * Returns -pte_internal if @insn or @iext is NULL. + */ +extern int pt_insn_next_ip(uint64_t *ip, const struct pt_insn *insn, + const struct pt_insn_ext *iext); + +/* Decode and analyze one instruction. + * + * Decodes the instructruction at @insn->ip in @insn->mode into @insn and @iext. + * + * If the instruction can not be decoded using a single memory read in a single + * section, sets @insn->truncated and reads the missing bytes from one or more + * other sections until either the instruction can be decoded or we're sure it + * is invalid. + * + * Returns the size in bytes on success, a negative error code otherwise. + * Returns -pte_bad_insn if the instruction could not be decoded. + */ +extern int pt_insn_decode(struct pt_insn *insn, struct pt_insn_ext *iext, + struct pt_image *image, const struct pt_asid *asid); + +/* Determine if a range of instructions is contiguous. + * + * Try to proceed from IP @begin to IP @end in @asid without using trace. + * + * Returns a positive integer if we reach @end from @begin. + * Returns zero if we couldn't reach @end within @nsteps steps. + * Returns a negative error code otherwise. + */ +extern int pt_insn_range_is_contiguous(uint64_t begin, uint64_t end, + enum pt_exec_mode mode, + struct pt_image *image, + const struct pt_asid *asid, + size_t nsteps); + +#endif /* PT_INSN_H */ diff --git a/libipt/internal/include/pt_insn_decoder.h b/libipt/internal/include/pt_insn_decoder.h new file mode 100644 index 0000000..c288028 --- /dev/null +++ b/libipt/internal/include/pt_insn_decoder.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_INSN_DECODER_H +#define PT_INSN_DECODER_H + +#include "pt_query_decoder.h" +#include "pt_image.h" +#include "pt_retstack.h" + +#include + + +struct pt_insn_decoder { + /* The Intel(R) Processor Trace query decoder. */ + struct pt_query_decoder query; + + /* The configuration flags. + * + * Those are our flags set by the user. In @query.config.flags, we set + * the flags we need for the query decoder. + */ + struct pt_conf_flags flags; + + /* The default image. */ + struct pt_image default_image; + + /* The image. */ + struct pt_image *image; + + /* The current address space. */ + struct pt_asid asid; + + /* The current Intel(R) Processor Trace event. */ + struct pt_event event; + + /* The call/return stack for ret compression. */ + struct pt_retstack retstack; + + /* The current IP. */ + uint64_t ip; + + /* The IP of the last disable. + * + * This is either zero or the IP of the first instruction that wasn't + * executed due to the disable event. + */ + uint64_t last_disable_ip; + + /* The current execution mode. */ + enum pt_exec_mode mode; + + /* The status of the last successful decoder query. + * + * Errors are reported directly; the status is always a non-negative + * pt_status_flag bit-vector. + */ + int status; + + /* A collection of flags defining how to proceed flow reconstruction: + * + * - tracing is enabled. + */ + uint32_t enabled:1; + + /* - process @event. */ + uint32_t process_event:1; + + /* - event processing may change the IP. */ + uint32_t event_may_change_ip:1; + + /* - instructions are executed speculatively. */ + uint32_t speculative:1; + + /* - a paging event has been bound to the current instruction. */ + uint32_t paging_event_bound:1; + + /* - a vmcs event has been bound to the current instruction. */ + uint32_t vmcs_event_bound:1; +}; + + +/* Initialize an instruction flow decoder. + * + * Returns zero on success; a negative error code otherwise. + * Returns -pte_internal, if @decoder is NULL. + * Returns -pte_invalid, if @config is NULL. + */ +extern int pt_insn_decoder_init(struct pt_insn_decoder *decoder, + const struct pt_config *config); + +/* Finalize an instruction flow decoder. */ +extern void pt_insn_decoder_fini(struct pt_insn_decoder *decoder); + +#endif /* PT_INSN_DECODER_H */ diff --git a/libipt/internal/include/pt_last_ip.h b/libipt/internal/include/pt_last_ip.h new file mode 100644 index 0000000..245d3db --- /dev/null +++ b/libipt/internal/include/pt_last_ip.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_LAST_IP_H +#define PT_LAST_IP_H + +#include + +struct pt_packet_ip; +struct pt_config; + + +/* Keeping track of the last-ip in Intel PT packets. */ +struct pt_last_ip { + /* The last IP. */ + uint64_t ip; + + /* Flags governing the handling of IP updates and queries: + * + * - we have seen an IP update. + */ + uint32_t have_ip:1; + /* - the IP has been suppressed in the last update. */ + uint32_t suppressed:1; +}; + + +/* Initialize (or reset) the last-ip. */ +extern void pt_last_ip_init(struct pt_last_ip *last_ip); + +/* Query the last-ip. + * + * If @ip is not NULL, provides the last-ip in @ip on success. + * + * Returns zero on success. + * Returns -pte_invalid if @last_ip is NULL. + * Returns -pte_noip if there is no last-ip. + * Returns -pte_ip_suppressed if the last-ip has been suppressed. + */ +extern int pt_last_ip_query(uint64_t *ip, const struct pt_last_ip *last_ip); + +/* Update last-ip. + * + * Updates @last_ip based on @packet and, if non-null, @config. + * + * Returns zero on success. + * Returns -pte_invalid if @last_ip or @packet is NULL. + * Returns -pte_bad_packet if @packet appears to be corrupted. + */ +extern int pt_last_ip_update_ip(struct pt_last_ip *last_ip, + const struct pt_packet_ip *packet, + const struct pt_config *config); + +#endif /* PT_LAST_IP_H */ diff --git a/libipt/internal/include/pt_mapped_section.h b/libipt/internal/include/pt_mapped_section.h new file mode 100644 index 0000000..7d7cfd1 --- /dev/null +++ b/libipt/internal/include/pt_mapped_section.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_MAPPED_SECTION_H +#define PT_MAPPED_SECTION_H + +#include "intel-pt.h" +#include "pt_section.h" + +#include + + +/* A section mapped into memory. */ +struct pt_mapped_section { + /* The section that is mapped. */ + struct pt_section *section; + + /* The address space into which the section is mapped. */ + struct pt_asid asid; + + /* The virtual address at which the section is mapped. */ + uint64_t vaddr; +}; + + +/* Initialize a mapped section - @section may be NULL. */ +static inline void pt_msec_init(struct pt_mapped_section *msec, + struct pt_section *section, + const struct pt_asid *asid, + uint64_t vaddr) +{ + if (!msec) + return; + + msec->section = section; + msec->vaddr = vaddr; + + if (asid) + msec->asid = *asid; + else + pt_asid_init(&msec->asid); +} + +/* Destroy a mapped section - does not free @msec->section. */ +static inline void pt_msec_fini(struct pt_mapped_section *msec) +{ + (void) msec; + + /* Nothing to do. */ +} + +/* Return the virtual address of the beginning of the memory region. */ +static inline uint64_t pt_msec_begin(const struct pt_mapped_section *msec) +{ + if (!msec) + return 0ull; + + return msec->vaddr; +} + +/* Return the virtual address one byte past the end of the memory region. */ +static inline uint64_t pt_msec_end(const struct pt_mapped_section *msec) +{ + uint64_t size; + + if (!msec) + return 0ull; + + size = pt_section_size(msec->section); + if (size) + size += msec->vaddr; + + return size; +} + +/* Return the underlying section. */ +static inline struct pt_section * +pt_msec_section(const struct pt_mapped_section *msec) +{ + return msec->section; +} + +/* Return an identifier for the address-space the section is mapped into. */ +static inline const struct pt_asid * +pt_msec_asid(const struct pt_mapped_section *msec) +{ + if (!msec) + return NULL; + + return &msec->asid; +} + +/* Translate a section/file offset into a virtual address. */ +static inline uint64_t pt_msec_map(const struct pt_mapped_section *msec, + uint64_t offset) +{ + return offset + msec->vaddr; +} + +/* Translate a virtual address into a section/file offset. */ +static inline uint64_t pt_msec_unmap(const struct pt_mapped_section *msec, + uint64_t vaddr) +{ + return vaddr - msec->vaddr; +} + +#endif /* PT_MAPPED_SECTION_H */ diff --git a/libipt/internal/include/pt_packet.h b/libipt/internal/include/pt_packet.h new file mode 100644 index 0000000..dd5b558 --- /dev/null +++ b/libipt/internal/include/pt_packet.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_PACKET_H +#define PT_PACKET_H + +#include + +struct pt_config; +struct pt_packet; +struct pt_packet_ip; +struct pt_packet_tnt; +struct pt_packet_pip; +struct pt_packet_mode; +struct pt_packet_tsc; +struct pt_packet_cbr; +struct pt_packet_tma; +struct pt_packet_mtc; +struct pt_packet_cyc; +struct pt_packet_vmcs; +struct pt_packet_mnt; + + +/* Read the payload of an Intel PT packet. + * + * Reads the payload of the packet starting at @pos into @packet. + * + * For pt_pkt_read_psb(), the @packet parameter is omitted; the function + * validates that the payload matches the expected PSB pattern. + * + * Decoding an unknown packet uses @config's decode callback. If the callback + * is NULL, pt_pkt_read_unknown() returns -pte_bad_opc. + * + * Beware that the packet opcode is not checked. The caller is responsible + * for checking the opcode and calling the correct packet read function. + * + * Returns the packet size on success, a negative error code otherwise. + * Returns -pte_bad_packet if the packet payload is corrupt. + * Returns -pte_eos if the packet does not fit into the trace buffer. + * Returns -pte_internal if @packet, @pos, or @config is NULL. + */ +extern int pt_pkt_read_unknown(struct pt_packet *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_psb(const uint8_t *pos, const struct pt_config *config); +extern int pt_pkt_read_ip(struct pt_packet_ip *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_tnt_8(struct pt_packet_tnt *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_tnt_64(struct pt_packet_tnt *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_pip(struct pt_packet_pip *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_mode(struct pt_packet_mode *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_tsc(struct pt_packet_tsc *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_cbr(struct pt_packet_cbr *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_tma(struct pt_packet_tma *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_mtc(struct pt_packet_mtc *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_cyc(struct pt_packet_cyc *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_vmcs(struct pt_packet_vmcs *packet, const uint8_t *pos, + const struct pt_config *config); +extern int pt_pkt_read_mnt(struct pt_packet_mnt *packet, const uint8_t *pos, + const struct pt_config *config); + +#endif /* PT_PACKET_H */ diff --git a/libipt/internal/include/pt_packet_decoder.h b/libipt/internal/include/pt_packet_decoder.h new file mode 100644 index 0000000..ff32587 --- /dev/null +++ b/libipt/internal/include/pt_packet_decoder.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_PACKET_DECODER_H +#define PT_PACKET_DECODER_H + +#include "intel-pt.h" + + +/* An Intel PT packet decoder. */ +struct pt_packet_decoder { + /* The decoder configuration. */ + struct pt_config config; + + /* The current position in the trace buffer. */ + const uint8_t *pos; + + /* The position of the last PSB packet. */ + const uint8_t *sync; +}; + + +/* Initialize the packet decoder. + * + * Returns zero on success, a negative error code otherwise. + */ +extern int pt_pkt_decoder_init(struct pt_packet_decoder *, + const struct pt_config *); + +/* Finalize the packet decoder. */ +extern void pt_pkt_decoder_fini(struct pt_packet_decoder *); + + +/* Decoder functions for the packet decoder. */ +extern int pt_pkt_decode_unknown(struct pt_packet_decoder *, + struct pt_packet *); +extern int pt_pkt_decode_pad(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_psb(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_tip(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_tnt_8(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_tnt_64(struct pt_packet_decoder *, + struct pt_packet *); +extern int pt_pkt_decode_tip_pge(struct pt_packet_decoder *, + struct pt_packet *); +extern int pt_pkt_decode_tip_pgd(struct pt_packet_decoder *, + struct pt_packet *); +extern int pt_pkt_decode_fup(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_pip(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_ovf(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_mode(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_psbend(struct pt_packet_decoder *, + struct pt_packet *); +extern int pt_pkt_decode_tsc(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_cbr(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_tma(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_mtc(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_cyc(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_stop(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_vmcs(struct pt_packet_decoder *, struct pt_packet *); +extern int pt_pkt_decode_mnt(struct pt_packet_decoder *, struct pt_packet *); + +#endif /* PT_PACKET_DECODER_H */ diff --git a/libipt/internal/include/pt_query_decoder.h b/libipt/internal/include/pt_query_decoder.h new file mode 100644 index 0000000..491c3bc --- /dev/null +++ b/libipt/internal/include/pt_query_decoder.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_QUERY_DECODER_H +#define PT_QUERY_DECODER_H + +#include "pt_last_ip.h" +#include "pt_tnt_cache.h" +#include "pt_time.h" +#include "pt_event_queue.h" + +#include "intel-pt.h" + +struct pt_decoder_function; + + +/* An Intel PT query decoder. */ +struct pt_query_decoder { + /* The decoder configuration. */ + struct pt_config config; + + /* The current position in the trace buffer. */ + const uint8_t *pos; + + /* The position of the last PSB packet. */ + const uint8_t *sync; + + /* The decoding function for the next packet. */ + const struct pt_decoder_function *next; + + /* The last-ip. */ + struct pt_last_ip ip; + + /* The cached tnt indicators. */ + struct pt_tnt_cache tnt; + + /* Timing information. */ + struct pt_time time; + + /* Timing calibration. */ + struct pt_time_cal tcal; + + /* Pending (incomplete) events. */ + struct pt_event_queue evq; + + /* The current event. */ + struct pt_event *event; + + /* A collection of flags relevant for decoding: + * + * - tracing is enabled. + */ + uint32_t enabled:1; + + /* - consume the current packet. */ + uint32_t consume_packet:1; +}; + +/* Initialize the query decoder. + * + * Returns zero on success, a negative error code otherwise. + */ +extern int pt_qry_decoder_init(struct pt_query_decoder *, + const struct pt_config *); + +/* Finalize the query decoder. */ +extern void pt_qry_decoder_fini(struct pt_query_decoder *); + +/* Decoder functions (tracing context). */ +extern int pt_qry_decode_unknown(struct pt_query_decoder *); +extern int pt_qry_decode_pad(struct pt_query_decoder *); +extern int pt_qry_decode_psb(struct pt_query_decoder *); +extern int pt_qry_decode_tip(struct pt_query_decoder *); +extern int pt_qry_decode_tnt_8(struct pt_query_decoder *); +extern int pt_qry_decode_tnt_64(struct pt_query_decoder *); +extern int pt_qry_decode_tip_pge(struct pt_query_decoder *); +extern int pt_qry_decode_tip_pgd(struct pt_query_decoder *); +extern int pt_qry_decode_fup(struct pt_query_decoder *); +extern int pt_qry_decode_pip(struct pt_query_decoder *); +extern int pt_qry_decode_ovf(struct pt_query_decoder *); +extern int pt_qry_decode_mode(struct pt_query_decoder *); +extern int pt_qry_decode_psbend(struct pt_query_decoder *); +extern int pt_qry_decode_tsc(struct pt_query_decoder *); +extern int pt_qry_header_tsc(struct pt_query_decoder *); +extern int pt_qry_decode_cbr(struct pt_query_decoder *); +extern int pt_qry_header_cbr(struct pt_query_decoder *); +extern int pt_qry_decode_tma(struct pt_query_decoder *); +extern int pt_qry_decode_mtc(struct pt_query_decoder *); +extern int pt_qry_decode_cyc(struct pt_query_decoder *); +extern int pt_qry_decode_stop(struct pt_query_decoder *); +extern int pt_qry_decode_vmcs(struct pt_query_decoder *); +extern int pt_qry_decode_mnt(struct pt_query_decoder *); + +/* Decoder functions (header context). */ +extern int pt_qry_header_fup(struct pt_query_decoder *); +extern int pt_qry_header_pip(struct pt_query_decoder *); +extern int pt_qry_header_mode(struct pt_query_decoder *); +extern int pt_qry_header_vmcs(struct pt_query_decoder *); + +#endif /* PT_QUERY_DECODER_H */ diff --git a/libipt/internal/include/pt_retstack.h b/libipt/internal/include/pt_retstack.h new file mode 100644 index 0000000..0d72d50 --- /dev/null +++ b/libipt/internal/include/pt_retstack.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_RETSTACK_H +#define PT_RETSTACK_H + +#include + + +/* The size of the call/return stack in number of entries. */ +enum { + pt_retstack_size = 64 +}; + +/* A stack of return addresses used for return compression. */ +struct pt_retstack { + /* The stack of return addresses. + * + * We use one additional entry in order to distinguish a full from + * an empty stack. + */ + uint64_t stack[pt_retstack_size + 1]; + + /* The top of the stack. */ + uint8_t top; + + /* The bottom of the stack. */ + uint8_t bottom; +}; + +/* Initialize (or reset) a call/return stack. */ +extern void pt_retstack_init(struct pt_retstack *); + +/* Test a call/return stack for emptiness. + * + * Returns zero if @retstack contains at least one element. + * Returns a positive integer if @retstack is empty. + * Returns -pte_invalid if @retstack is NULL. + */ +extern int pt_retstack_is_empty(const struct pt_retstack *retstack); + +/* Pop and return the topmost IP. + * + * If @ip is not NULL, provides the topmost return address on success. + * If @retstack is not empty, pops the topmost return address on success. + * + * Returns zero on success. + * Returns -pte_invalid if @retstack is NULL. + * Returns -pte_noip if @retstack is empty. + */ +extern int pt_retstack_pop(struct pt_retstack *retstack, uint64_t *ip); + +/* Push a return address onto the stack. + * + * Pushes @ip onto @retstack. + * If @retstack is full, drops the oldest return address. + * + * Returns zero on success. + */ +extern int pt_retstack_push(struct pt_retstack *retstack, uint64_t ip); + +#endif /* PT_RETSTACK_H */ diff --git a/libipt/internal/include/pt_section.h b/libipt/internal/include/pt_section.h new file mode 100644 index 0000000..58fb241 --- /dev/null +++ b/libipt/internal/include/pt_section.h @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_SECTION_H +#define PT_SECTION_H + +#include +#include + +#if defined(FEATURE_THREADS) +# include +#endif /* defined(FEATURE_THREADS) */ + +struct pt_block_cache; + + +/* A section of contiguous memory loaded from a file. */ +struct pt_section { + /* The name of the file. */ + char *filename; + + /* The offset into the file. */ + uint64_t offset; + + /* The (adjusted) size in bytes. The size is truncated to match the + * actual file size. + */ + uint64_t size; + + /* A pointer to OS-specific file status for detecting changes. + * + * The status is initialized on first pt_section_map() and will be + * left in the section until the section is destroyed. This field + * is owned by the OS-specific mmap-based section implementation. + */ + void *status; + + /* A pointer to implementation-specific mapping information - NULL if + * the section is currently not mapped. + * + * This field is set in pt_section_map() and owned by the mapping + * implementation. + */ + void *mapping; + + /* A pointer to an optional block cache. + * + * The cache is created and destroyed implicitly when the section is + * mapped and unmapped respectively. + */ + struct pt_block_cache *bcache; + + /* A pointer to the unmap function - NULL if the section is currently + * not mapped. + * + * This field is set in pt_section_map() and owned by the mapping + * implementation. + */ + int (*unmap)(struct pt_section *sec); + + /* A pointer to the read function - NULL if the section is currently + * not mapped. + * + * This field is set in pt_section_map() and owned by the mapping + * implementation. + */ + int (*read)(const struct pt_section *sec, uint8_t *buffer, + uint16_t size, uint64_t offset); + +#if defined(FEATURE_THREADS) + /* A lock protecting this section. + * + * Most operations do not require the section to be locked. All + * actual locking should be handled by pt_section_* functions. + */ + mtx_t lock; +#endif /* defined(FEATURE_THREADS) */ + + /* The number of current users. The last user destroys the section. */ + uint16_t ucount; + + /* The number of current mappers. The last unmaps the section. */ + uint16_t mcount; + + /* A collection of flags to: + * + * - disable block caching. + */ + uint32_t disable_bcache:1; +}; + +/* Create a section. + * + * The returned section describes the contents of @file starting at @offset + * for @size bytes. + * + * If @file is shorter than the requested @size, the section is silently + * truncated to the size of @file. + * + * If @offset lies beyond the end of @file, no section is created. + * + * The returned section is not mapped and starts with a user count of one and + * instruction caching enabled. + * + * Returns a new section on success, NULL otherwise. + */ +extern struct pt_section *pt_mk_section(const char *file, uint64_t offset, + uint64_t size); + +/* Clone (parts of) a section. + * + * The cloned section describes the same content as @section but starting at + * @offset for @size bytes. The cloned range must lie within @section. + * + * The cloned section is not mapped and starts with a user count of one. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @clone or @section is NULL. + * Returns -pte_internal if the cloned range lies outside of @section. + */ +extern int pt_section_clone(struct pt_section **clone, + const struct pt_section *section, uint64_t offset, + uint64_t size); + +/* Lock a section. + * + * Locks @section. The section must not be locked. + * + * Returns a new section on success, NULL otherwise. + * Returns -pte_bad_lock on any locking error. + */ +extern int pt_section_lock(struct pt_section *section); + +/* Unlock a section. + * + * Unlocks @section. The section must be locked. + * + * Returns a new section on success, NULL otherwise. + * Returns -pte_bad_lock on any locking error. + */ +extern int pt_section_unlock(struct pt_section *section); + +/* Add another user. + * + * Increments the user count of @section. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section is NULL. + * Returns -pte_internal if the user count would overflow. + * Returns -pte_bad_lock on any locking error. + */ +extern int pt_section_get(struct pt_section *section); + +/* Remove a user. + * + * Decrements the user count of @section. Destroys the section if the + * count reaches zero. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section is NULL. + * Returns -pte_internal if the user count is already zero. + * Returns -pte_bad_lock on any locking error. + */ +extern int pt_section_put(struct pt_section *section); + +/* Return the filename of @section. */ +extern const char *pt_section_filename(const struct pt_section *section); + +/* Return the offset of the section in bytes. */ +extern uint64_t pt_section_offset(const struct pt_section *section); + +/* Return the size of the section in bytes. */ +extern uint64_t pt_section_size(const struct pt_section *section); + +/* Return @section's block cache, if available. + * + * @section must be mapped. + * + * The cache, if available, is implicitly created when the section is mapped and + * implicitly destroyed when the section is unmapped. + * + * The cache is not use-counted. It is only valid as long as the caller keeps + * @section mapped. + */ +static inline struct pt_block_cache * +pt_section_bcache(const struct pt_section *section) +{ + if (!section) + return NULL; + + return section->bcache; +} + +/* Enable block caching. */ +static inline void pt_section_enable_bcache(struct pt_section *section) +{ + section->disable_bcache = 0; +} + +/* Disable block caching. */ +static inline void pt_section_disable_bcache(struct pt_section *section) +{ + section->disable_bcache = 1; +} + +/* Create the OS-specific file status. + * + * On success, allocates a status object, provides a pointer to it in @pstatus + * and provides the file size in @psize. + * + * The status object will be free()'ed when its section is. + * + * This function is implemented in the OS-specific section implementation. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @pstatus, @psize, or @filename is NULL. + * Returns -pte_bad_image if @filename can't be opened. + * Returns -pte_nomem if the status object can't be allocated. + */ +extern int pt_section_mk_status(void **pstatus, uint64_t *psize, + const char *filename); + +/* Setup a block cache. + * + * This function is called from the OS-specific implementation when the section + * is mapped. Do not call this function directly. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section is NULL. + * Returns -pte_internal if @section already has an instruction cache. + */ +extern int pt_section_add_bcache(struct pt_section *section); + +/* Map a section. + * + * Maps @section into memory. Mappings are use-counted. The number of + * pt_section_map() calls must match the number of pt_section_unmap() + * calls. + * + * This function is implemented in the OS-specific section implementation. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section is NULL. + * Returns -pte_bad_image if @section changed or can't be opened. + * Returns -pte_bad_lock on any locking error. + * Returns -pte_nomem if @section can't be mapped into memory. + */ +extern int pt_section_map(struct pt_section *section); + +/* Unmap a section. + * + * Unmaps @section from memory. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section is NULL. + * Returns -pte_bad_lock on any locking error. + * Returns -pte_internal if @section has not been mapped. + */ +extern int pt_section_unmap(struct pt_section *section); + +/* Read memory from a section. + * + * Reads at most @size bytes from @section at @offset into @buffer. @section + * must be mapped. + * + * Returns the number of bytes read on success, a negative error code otherwise. + * Returns -pte_internal if @section or @buffer are NULL. + * Returns -pte_nomap if @offset is beyond the end of the section. + */ +extern int pt_section_read(const struct pt_section *section, uint8_t *buffer, + uint16_t size, uint64_t offset); + +#endif /* PT_SECTION_H */ diff --git a/libipt/internal/include/pt_section_file.h b/libipt/internal/include/pt_section_file.h new file mode 100644 index 0000000..380dabd --- /dev/null +++ b/libipt/internal/include/pt_section_file.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_SECTION_FILE_H +#define PT_SECTION_FILE_H + +#include +#include + +#if defined(FEATURE_THREADS) +# include +#endif /* defined(FEATURE_THREADS) */ + +struct pt_section; + + +/* File-based section mapping information. */ +struct pt_sec_file_mapping { + /* The FILE pointer. */ + FILE *file; + + /* The begin and end of the section as offset into @file. */ + long begin, end; + +#if defined(FEATURE_THREADS) + /* A lock protecting read access to this file. + * + * Since we need to first set the file position indication before + * we can read, there's a race on the file position. + */ + mtx_t lock; +#endif /* defined(FEATURE_THREADS) */ +}; + + +/* Map a section based on file operations. + * + * The caller has already opened the file for reading. + * + * On success, sets @section's mapping, unmap, and read pointers. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section or @file are NULL. + * Returns -pte_invalid if @section can't be mapped. + */ +extern int pt_sec_file_map(struct pt_section *section, FILE *file); + +/* Unmap a section based on file operations. + * + * On success, clears @section's mapping, unmap, and read pointers. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section is NULL. + * Returns -pte_internal if @section has not been mapped. + */ +extern int pt_sec_file_unmap(struct pt_section *section); + +/* Read memory from a file based section. + * + * Reads at most @size bytes from @section at @offset into @buffer. + * + * Returns the number of bytes read on success, a negative error code otherwise. + * Returns -pte_invalid if @section or @buffer are NULL. + * Returns -pte_nomap if @offset is beyond the end of the section. + */ +extern int pt_sec_file_read(const struct pt_section *section, uint8_t *buffer, + uint16_t size, uint64_t offset); + +#endif /* PT_SECTION_FILE_H */ diff --git a/libipt/internal/include/pt_sync.h b/libipt/internal/include/pt_sync.h new file mode 100644 index 0000000..2286133 --- /dev/null +++ b/libipt/internal/include/pt_sync.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_SYNC_H +#define PT_SYNC_H + +#include + +struct pt_config; + + +/* Synchronize onto the trace stream. + * + * Search for the next synchronization point in forward or backward direction + * starting at @pos using the trace configuration @config. + * + * On success, stores a pointer to the next synchronization point in @sync. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_internal if @sync, @pos, or @config is NULL. + * Returns -pte_nosync if @pos lies outside of @config's buffer. + * Returns -pte_eos if no further synchronization point is found. + */ +extern int pt_sync_forward(const uint8_t **sync, const uint8_t *pos, + const struct pt_config *config); +extern int pt_sync_backward(const uint8_t **sync, const uint8_t *pos, + const struct pt_config *config); + +/* Manually synchronize onto the trace stream. + * + * Validate that @pos is within the bounds of @config's trace buffer and that + * there is a synchronization point at @pos. + * + * On success, stores @pos in @sync. + * + * Returns zero on success, a negative error code otherwise. + * + * Returns -pte_eos if @pos is outside of @config's trace buffer. + * Returns -pte_internal if @sync, @pos, or @config is NULL. + * Returns -pte_bad_packet if there is no PSB at @pos. + */ +extern int pt_sync_set(const uint8_t **sync, const uint8_t *pos, + const struct pt_config *config); + +#endif /* PT_SYNC_H */ diff --git a/libipt/internal/include/pt_time.h b/libipt/internal/include/pt_time.h new file mode 100644 index 0000000..7367b10 --- /dev/null +++ b/libipt/internal/include/pt_time.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_TIME_H +#define PT_TIME_H + +#include + +struct pt_config; +struct pt_packet_tsc; +struct pt_packet_cbr; +struct pt_packet_tma; +struct pt_packet_mtc; +struct pt_packet_cyc; + + +/* Intel(R) Processor Trace timing. */ +struct pt_time { + /* The estimated Time Stamp Count. */ + uint64_t tsc; + + /* The base Time Stamp Count (from TSC and MTC). */ + uint64_t base; + + /* The estimated Fast Counter. */ + uint64_t fc; + + /* The adjusted last CTC value (from MTC and TMA). */ + uint32_t ctc; + + /* The adjusted CTC value when @fc was cleared (from MTC and TMA). */ + uint32_t ctc_cyc; + + /* The number of lost MTC updates. */ + uint32_t lost_mtc; + + /* The number of lost CYC updates. */ + uint32_t lost_cyc; + + /* The core:bus ratio. */ + uint8_t cbr; + + /* A flag saying whether we have seen a TSC packet. */ + uint32_t have_tsc:1; + + /* A flag saying whether we have seen a CBR packet. */ + uint32_t have_cbr:1; + + /* A flag saying whether we have seen a TMA packet. */ + uint32_t have_tma:1; + + /* A flag saying whether we have seen a MTC packet. */ + uint32_t have_mtc:1; +}; + +/* Initialize (or reset) the time. */ +extern void pt_time_init(struct pt_time *time); + +/* Query the current time. + * + * Provides the estimated Time Stamp Count value in @tsc. + * + * If @lost_mtc is not NULL, provides the number of lost MTC packets. + * If @lost_cyc is not NULL, provides the number of lost CYC packets. + * + * Returns zero on success; a negative error code, otherwise. + * Returns -pte_internal if @tsc or @time is NULL. + * Returns -pte_no_time if there has not been a TSC packet. + */ +extern int pt_time_query_tsc(uint64_t *tsc, uint32_t *lost_mtc, + uint32_t *lost_cyc, const struct pt_time *time); + +/* Query the current core:bus ratio. + * + * Provides the core:bus ratio in @cbr. + * + * Returns zero on success; a negative error code, otherwise. + * Returns -pte_internal if @cbr or @time is NULL. + * Returns -pte_no_cbr if there has not been a CBR packet. + */ +extern int pt_time_query_cbr(uint32_t *cbr, const struct pt_time *time); + +/* Update the time based on an Intel PT packet. + * + * Returns zero on success. + * Returns a negative error code, otherwise. + */ +extern int pt_time_update_tsc(struct pt_time *, const struct pt_packet_tsc *, + const struct pt_config *); +extern int pt_time_update_cbr(struct pt_time *, const struct pt_packet_cbr *, + const struct pt_config *); +extern int pt_time_update_tma(struct pt_time *, const struct pt_packet_tma *, + const struct pt_config *); +extern int pt_time_update_mtc(struct pt_time *, const struct pt_packet_mtc *, + const struct pt_config *); +/* @fcr is the fast-counter:cycles ratio obtained by calibration. */ +extern int pt_time_update_cyc(struct pt_time *, const struct pt_packet_cyc *, + const struct pt_config *, uint64_t fcr); + + +/* Timing calibration. + * + * Used for estimating the Fast-Counter:Cycles ratio. + * + * Ideally, we calibrate by counting CYCs between MTCs. Lacking MTCs, we + * use TSC, instead. + */ +struct pt_time_cal { + /* The estimated fast-counter:cycles ratio. */ + uint64_t fcr; + + /* The minimal and maximal @fcr values. */ + uint64_t min_fcr, max_fcr; + + /* The last TSC value. + * + * Used for calibrating at TSC. + */ + uint64_t tsc; + + /* The number of cycles since the last TSC (from CYC). + * + * Used for calibrating at TSC. + */ + uint64_t cyc_tsc; + + /* The number of cycles since the last MTC (from CYC). + * + * Used for calibrating at MTC. + */ + uint64_t cyc_mtc; + + /* The adjusted last CTC value (from MTC). + * + * Used for calibrating at MTC. + */ + uint32_t ctc; + + /* The number of lost MTC updates since the last successful update. */ + uint32_t lost_mtc; + + /* A flag saying whether we have seen a MTC packet. */ + uint32_t have_mtc:1; +}; + +enum { + /* The amount by which the fcr value is right-shifted. + * + * Do not shift the value obtained by pt_tcal_fcr() when passing it to + * pt_time_update_cyc(). + * Do shift the value passed to pt_tcal_set_fcr(). + */ + pt_tcal_fcr_shr = 8 +}; + +/* Initialize of reset timing calibration. */ +extern void pt_tcal_init(struct pt_time_cal *tcal); + +/* Query the estimated fast-counter:cycles ratio. + * + * Provides the estimated ratio in @fcr unless -pte_internal or + * -pte_no_time is returned. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @fcr or @tcal is NULL. + * Returns -pte_no_time if no information is available. + */ +extern int pt_tcal_fcr(uint64_t *fcr, const struct pt_time_cal *tcal); + +/* Set the fast-counter:cycles ratio. + * + * Timing calibration takes one CBR or two MTC packets before it can provide + * first estimations. Use this to supply an initial value to be used in the + * meantime. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @cal is NULL. + */ +extern int pt_tcal_set_fcr(struct pt_time_cal *tcal, uint64_t fcr); + +/* Update calibration based on an Intel PT packet. + * + * Returns zero on success, a negative error code otherwise. + */ +extern int pt_tcal_update_tsc(struct pt_time_cal *, + const struct pt_packet_tsc *, + const struct pt_config *); +extern int pt_tcal_header_tsc(struct pt_time_cal *, + const struct pt_packet_tsc *, + const struct pt_config *); +extern int pt_tcal_update_cbr(struct pt_time_cal *, + const struct pt_packet_cbr *, + const struct pt_config *); +extern int pt_tcal_header_cbr(struct pt_time_cal *, + const struct pt_packet_cbr *, + const struct pt_config *); +extern int pt_tcal_update_tma(struct pt_time_cal *, + const struct pt_packet_tma *, + const struct pt_config *); +extern int pt_tcal_update_mtc(struct pt_time_cal *, + const struct pt_packet_mtc *, + const struct pt_config *); +extern int pt_tcal_update_cyc(struct pt_time_cal *, + const struct pt_packet_cyc *, + const struct pt_config *); + +#endif /* PT_TIME_H */ diff --git a/libipt/internal/include/pt_tnt_cache.h b/libipt/internal/include/pt_tnt_cache.h new file mode 100644 index 0000000..5cf91e2 --- /dev/null +++ b/libipt/internal/include/pt_tnt_cache.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_TNT_CACHE_H +#define PT_TNT_CACHE_H + +#include + +struct pt_packet_tnt; +struct pt_config; + + +/* Keeping track of tnt indicators. */ +struct pt_tnt_cache { + /* The last tnt. */ + uint64_t tnt; + + /* The index into the above tnt. + * + * (tnt & index) gives the current tnt entry. + * (index >>= 1) moves the index to the next tnt entry. + * (index == 0) means that the current tnt is empty. + */ + uint64_t index; +}; + + +/* Initialize (or reset) the tnt cache. */ +extern void pt_tnt_cache_init(struct pt_tnt_cache *cache); + +/* Check if the tnt cache is empty. + * + * Returns 0 if the tnt cache is not empty. + * Returns > 0 if the tnt cache is empty. + * Returns -pte_invalid if @cache is NULL. + */ +extern int pt_tnt_cache_is_empty(const struct pt_tnt_cache *cache); + +/* Query the next tnt indicator. + * + * This consumes the returned tnt indicator in the cache. + * + * Returns 0 if the next branch is not taken. + * Returns > 0 if the next branch is taken. + * Returns -pte_invalid if @cache is NULL. + * Returns -pte_bad_query if there is no tnt cached. + */ +extern int pt_tnt_cache_query(struct pt_tnt_cache *cache); + +/* Update the tnt cache based on Intel PT packets. + * + * Updates @cache based on @packet and, if non-null, @config. + * + * Returns zero on success. + * Returns -pte_invalid if @cache or @packet is NULL. + * Returns -pte_bad_packet if @packet appears to be corrupted. + * Returns -pte_bad_context if the tnt cache is not empty. + */ +extern int pt_tnt_cache_update_tnt(struct pt_tnt_cache *cache, + const struct pt_packet_tnt *packet, + const struct pt_config *config); + +#endif /* PT_TNT_CACHE_H */ diff --git a/libipt/internal/include/pti-disp-defs.h b/libipt/internal/include/pti-disp-defs.h new file mode 100644 index 0000000..f0a843b --- /dev/null +++ b/libipt/internal/include/pti-disp-defs.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#if !defined(PTI_DISP_DEFS_H) +#define PTI_DISP_DEFS_H + +#define PTI_DISP_NONE 0 +#define PTI_PRESERVE_DEFAULT 1 +#define PTI_BRDISP8 2 +#define PTI_DISP_BUCKET_0_l1 3 +#define PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2 4 +#define PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2 5 +#define PTI_RESOLVE_BYREG_DISP_map0x0_op0xc7_l1 6 +#endif diff --git a/libipt/internal/include/pti-disp.h b/libipt/internal/include/pti-disp.h new file mode 100644 index 0000000..264061f --- /dev/null +++ b/libipt/internal/include/pti-disp.h @@ -0,0 +1,544 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +static uint8_t disp_bytes_map_0x0[256] = { +/*opcode 0x0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf*/ 0, +/*opcode 0x10*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x11*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x12*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x13*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x14*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x15*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x16*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x17*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x18*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x19*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x20*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x21*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x22*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x23*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x24*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x25*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x26*/ 0, +/*opcode 0x27*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x28*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x29*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2e*/ 0, +/*opcode 0x2f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x30*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x31*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x32*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x33*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x34*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x35*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x36*/ 0, +/*opcode 0x37*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x38*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x39*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x3a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x3b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x3c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x3d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x3e*/ 0, +/*opcode 0x3f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x40*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x41*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x42*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x43*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x44*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x45*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x46*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x47*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x48*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x49*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x50*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x51*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x52*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x53*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x54*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x55*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x56*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x57*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x58*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x59*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x60*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x61*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x62*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x63*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x64*/ 0, +/*opcode 0x65*/ 0, +/*opcode 0x66*/ 0, +/*opcode 0x67*/ 0, +/*opcode 0x68*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x69*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x70*/ PTI_BRDISP8, +/*opcode 0x71*/ PTI_BRDISP8, +/*opcode 0x72*/ PTI_BRDISP8, +/*opcode 0x73*/ PTI_BRDISP8, +/*opcode 0x74*/ PTI_BRDISP8, +/*opcode 0x75*/ PTI_BRDISP8, +/*opcode 0x76*/ PTI_BRDISP8, +/*opcode 0x77*/ PTI_BRDISP8, +/*opcode 0x78*/ PTI_BRDISP8, +/*opcode 0x79*/ PTI_BRDISP8, +/*opcode 0x7a*/ PTI_BRDISP8, +/*opcode 0x7b*/ PTI_BRDISP8, +/*opcode 0x7c*/ PTI_BRDISP8, +/*opcode 0x7d*/ PTI_BRDISP8, +/*opcode 0x7e*/ PTI_BRDISP8, +/*opcode 0x7f*/ PTI_BRDISP8, +/*opcode 0x80*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x81*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x82*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x83*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x84*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x85*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x86*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x87*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x88*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x89*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x8a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x8b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x8c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x8d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x8e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x8f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x90*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x91*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x92*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x93*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x94*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x95*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x96*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x97*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x98*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x99*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9a*/ PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0x9b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa0*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2, +/*opcode 0xa1*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2, +/*opcode 0xa2*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2, +/*opcode 0xa3*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2, +/*opcode 0xa4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xaa*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xab*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xac*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xad*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xae*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xaf*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xba*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xbb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xbc*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xbd*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xbe*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xbf*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc7*/ PTI_RESOLVE_BYREG_DISP_map0x0_op0xc7_l1, +/*opcode 0xc8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xca*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xcb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xcc*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xcd*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xce*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xcf*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xda*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xdb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xdc*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xdd*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xde*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xdf*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe0*/ PTI_BRDISP8, +/*opcode 0xe1*/ PTI_BRDISP8, +/*opcode 0xe2*/ PTI_BRDISP8, +/*opcode 0xe3*/ PTI_BRDISP8, +/*opcode 0xe4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe8*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0xe9*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0xea*/ PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xeb*/ PTI_BRDISP8, +/*opcode 0xec*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xed*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xee*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xef*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf0*/ 0, +/*opcode 0xf1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf2*/ 0, +/*opcode 0xf3*/ 0, +/*opcode 0xf4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xfa*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xfb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xfc*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xfd*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xfe*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xff*/ PTI_PRESERVE_DEFAULT, +}; +static uint8_t disp_bytes_map_0x0F[256] = { +/*opcode 0x0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4*/ 0, +/*opcode 0x5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa*/ 0, +/*opcode 0xb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc*/ 0, +/*opcode 0xd*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf*/ 0, +/*opcode 0x10*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x11*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x12*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x13*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x14*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x15*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x16*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x17*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x18*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x19*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x1f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x20*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x21*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x22*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x23*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x24*/ 0, +/*opcode 0x25*/ 0, +/*opcode 0x26*/ 0, +/*opcode 0x27*/ 0, +/*opcode 0x28*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x29*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x2f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x30*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x31*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x32*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x33*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x34*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x35*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x36*/ 0, +/*opcode 0x37*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x38*/ 0, +/*opcode 0x39*/ 0, +/*opcode 0x3a*/ 0, +/*opcode 0x3b*/ 0, +/*opcode 0x3c*/ 0, +/*opcode 0x3d*/ 0, +/*opcode 0x3e*/ 0, +/*opcode 0x3f*/ 0, +/*opcode 0x40*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x41*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x42*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x43*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x44*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x45*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x46*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x47*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x48*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x49*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x4f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x50*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x51*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x52*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x53*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x54*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x55*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x56*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x57*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x58*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x59*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x5f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x60*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x61*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x62*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x63*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x64*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x65*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x66*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x67*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x68*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x69*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x6f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x70*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x71*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x72*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x73*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x74*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x75*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x76*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x77*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x78*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x79*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x7a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x7b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x7c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x7d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x7e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x7f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x80*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x81*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x82*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x83*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x84*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x85*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x86*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x87*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x88*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x89*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x8a*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x8b*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x8c*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x8d*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x8e*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x8f*/ PTI_DISP_BUCKET_0_l1, +/*opcode 0x90*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x91*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x92*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x93*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x94*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x95*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x96*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x97*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x98*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x99*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9a*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9b*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9c*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9d*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9e*/ PTI_PRESERVE_DEFAULT, +/*opcode 0x9f*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa6*/ 0, +/*opcode 0xa7*/ 0, +/*opcode 0xa8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xa9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xaa*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xab*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xac*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xad*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xae*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xaf*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xb9*/ 0, +/*opcode 0xba*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xbb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xbc*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xbd*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xbe*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xbf*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xc9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xca*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xcb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xcc*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xcd*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xce*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xcf*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xd9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xda*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xdb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xdc*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xdd*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xde*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xdf*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xe9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xea*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xeb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xec*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xed*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xee*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xef*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf0*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf1*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf2*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf3*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf4*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf5*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf6*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf7*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf8*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xf9*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xfa*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xfb*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xfc*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xfd*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xfe*/ PTI_PRESERVE_DEFAULT, +/*opcode 0xff*/ 0, +}; diff --git a/libipt/internal/include/pti-imm-defs.h b/libipt/internal/include/pti-imm-defs.h new file mode 100644 index 0000000..888e703 --- /dev/null +++ b/libipt/internal/include/pti-imm-defs.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#if !defined(PTI_IMM_DEFS_H) +#define PTI_IMM_DEFS_H + +#define PTI_IMM_NONE 0 +#define PTI_0_IMM_WIDTH_CONST_l2 1 +#define PTI_UIMM8_IMM_WIDTH_CONST_l2 2 +#define PTI_SIMM8_IMM_WIDTH_CONST_l2 3 +#define PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2 4 +#define PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2 5 +#define PTI_UIMM16_IMM_WIDTH_CONST_l2 6 +#define PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf7_l1 7 +#define PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xc7_l1 8 +#define PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_DF64_EOSZ_l2 9 +#define PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf6_l1 10 +#define PTI_IMM_hasimm_map0x0_op0xc8_l1 11 +#define PTI_IMM_hasimm_map0x0F_op0x78_l1 12 + +#endif diff --git a/libipt/internal/include/pti-imm.h b/libipt/internal/include/pti-imm.h new file mode 100644 index 0000000..ebca50e --- /dev/null +++ b/libipt/internal/include/pti-imm.h @@ -0,0 +1,544 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +static uint8_t imm_bytes_map_0x0[256] = { +/*opcode 0x0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x5*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0x6*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xd*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xe*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf*/ 0, +/*opcode 0x10*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x11*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x12*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x13*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x14*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x15*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0x16*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x17*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x18*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x19*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1c*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x1d*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0x1e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x20*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x21*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x22*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x23*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x24*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x25*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0x26*/ 0, +/*opcode 0x27*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x28*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x29*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2c*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x2d*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0x2e*/ 0, +/*opcode 0x2f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x30*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x31*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x32*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x33*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x34*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x35*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0x36*/ 0, +/*opcode 0x37*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x38*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x39*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x3a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x3b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x3c*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x3d*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0x3e*/ 0, +/*opcode 0x3f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x40*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x41*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x42*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x43*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x44*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x45*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x46*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x47*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x48*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x49*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x50*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x51*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x52*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x53*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x54*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x55*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x56*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x57*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x58*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x59*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x60*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x61*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x62*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x63*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x64*/ 0, +/*opcode 0x65*/ 0, +/*opcode 0x66*/ 0, +/*opcode 0x67*/ 0, +/*opcode 0x68*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_DF64_EOSZ_l2, +/*opcode 0x69*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0x6a*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x6b*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x6c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x6d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x6e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x6f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x70*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x71*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x72*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x73*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x74*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x75*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x76*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x77*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x78*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x79*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x80*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x81*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0x82*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x83*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x84*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x85*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x86*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x87*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x88*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x89*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x90*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x91*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x92*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x93*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x94*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x95*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x96*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x97*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x98*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x99*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9a*/ PTI_UIMM16_IMM_WIDTH_CONST_l2, +/*opcode 0x9b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa2*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa4*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa5*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa6*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa7*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa8*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xa9*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xaa*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xab*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xac*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xad*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xae*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xaf*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb0*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xb1*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xb2*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xb3*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xb4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xb5*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xb6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xb7*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xb8*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xb9*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xba*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xbb*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xbc*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xbd*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xbe*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xbf*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2, +/*opcode 0xc0*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xc1*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xc2*/ PTI_UIMM16_IMM_WIDTH_CONST_l2, +/*opcode 0xc3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc4*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc5*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xc7*/ PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xc7_l1, +/*opcode 0xc8*/ PTI_IMM_hasimm_map0x0_op0xc8_l1, +/*opcode 0xc9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xca*/ PTI_UIMM16_IMM_WIDTH_CONST_l2, +/*opcode 0xcb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xcc*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xcd*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xce*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xcf*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd2*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd4*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xd5*/ PTI_SIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xd6*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd7*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xda*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xdb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xdc*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xdd*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xde*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xdf*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe2*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xe5*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xe6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xe7*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xe8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xea*/ PTI_UIMM16_IMM_WIDTH_CONST_l2, +/*opcode 0xeb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xec*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xed*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xee*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xef*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf0*/ 0, +/*opcode 0xf1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf2*/ 0, +/*opcode 0xf3*/ 0, +/*opcode 0xf4*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf5*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf6*/ PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf6_l1, +/*opcode 0xf7*/ PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf7_l1, +/*opcode 0xf8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xfa*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xfb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xfc*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xfd*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xfe*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xff*/ PTI_0_IMM_WIDTH_CONST_l2, +}; +static uint8_t imm_bytes_map_0x0F[256] = { +/*opcode 0x0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4*/ 0, +/*opcode 0x5*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x6*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa*/ 0, +/*opcode 0xb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc*/ 0, +/*opcode 0xd*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf*/ 0, +/*opcode 0x10*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x11*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x12*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x13*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x14*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x15*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x16*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x17*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x18*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x19*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x1f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x20*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x21*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x22*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x23*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x24*/ 0, +/*opcode 0x25*/ 0, +/*opcode 0x26*/ 0, +/*opcode 0x27*/ 0, +/*opcode 0x28*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x29*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x2f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x30*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x31*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x32*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x33*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x34*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x35*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x36*/ 0, +/*opcode 0x37*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x38*/ 0, +/*opcode 0x39*/ 0, +/*opcode 0x3a*/ 0, +/*opcode 0x3b*/ 0, +/*opcode 0x3c*/ 0, +/*opcode 0x3d*/ 0, +/*opcode 0x3e*/ 0, +/*opcode 0x3f*/ 0, +/*opcode 0x40*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x41*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x42*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x43*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x44*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x45*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x46*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x47*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x48*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x49*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x4f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x50*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x51*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x52*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x53*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x54*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x55*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x56*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x57*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x58*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x59*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x5f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x60*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x61*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x62*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x63*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x64*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x65*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x66*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x67*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x68*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x69*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x6a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x6b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x6c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x6d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x6e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x6f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x70*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x71*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x72*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x73*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0x74*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x75*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x76*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x77*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x78*/ PTI_IMM_hasimm_map0x0F_op0x78_l1, +/*opcode 0x79*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x7f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x80*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x81*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x82*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x83*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x84*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x85*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x86*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x87*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x88*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x89*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x8f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x90*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x91*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x92*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x93*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x94*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x95*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x96*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x97*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x98*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x99*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9a*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9b*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9c*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9d*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9e*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0x9f*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa2*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xa5*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa6*/ 0, +/*opcode 0xa7*/ 0, +/*opcode 0xa8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xa9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xaa*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xab*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xac*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xad*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xae*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xaf*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb2*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb4*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb5*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb6*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb7*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xb9*/ 0, +/*opcode 0xba*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xbb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xbc*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xbd*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xbe*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xbf*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc2*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xc3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xc5*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xc6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2, +/*opcode 0xc7*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xc9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xca*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xcb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xcc*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xcd*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xce*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xcf*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd2*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd4*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd5*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd6*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd7*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xd9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xda*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xdb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xdc*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xdd*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xde*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xdf*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe2*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe4*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe5*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe6*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe7*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xe9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xea*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xeb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xec*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xed*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xee*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xef*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf0*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf1*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf2*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf3*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf4*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf5*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf6*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf7*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf8*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xf9*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xfa*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xfb*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xfc*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xfd*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xfe*/ PTI_0_IMM_WIDTH_CONST_l2, +/*opcode 0xff*/ 0, +}; diff --git a/libipt/internal/include/pti-modrm-defs.h b/libipt/internal/include/pti-modrm-defs.h new file mode 100644 index 0000000..6faa045 --- /dev/null +++ b/libipt/internal/include/pti-modrm-defs.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#if !defined(PTI_MODRM_DEFS_H) +#define PTI_MODRM_DEFS_H + + +#define PTI_MODRM_FALSE 0 +#define PTI_MODRM_TRUE 1 +#define PTI_MODRM_IGNORE_MOD 2 +#define PTI_MODRM_UNDEF 3 + +#endif diff --git a/libipt/internal/include/pti-modrm.h b/libipt/internal/include/pti-modrm.h new file mode 100644 index 0000000..a2d5498 --- /dev/null +++ b/libipt/internal/include/pti-modrm.h @@ -0,0 +1,544 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +static uint8_t has_modrm_map_0x0[256] = { +/*opcode 0x0*/ PTI_MODRM_TRUE, +/*opcode 0x1*/ PTI_MODRM_TRUE, +/*opcode 0x2*/ PTI_MODRM_TRUE, +/*opcode 0x3*/ PTI_MODRM_TRUE, +/*opcode 0x4*/ PTI_MODRM_FALSE, +/*opcode 0x5*/ PTI_MODRM_FALSE, +/*opcode 0x6*/ PTI_MODRM_FALSE, +/*opcode 0x7*/ PTI_MODRM_FALSE, +/*opcode 0x8*/ PTI_MODRM_TRUE, +/*opcode 0x9*/ PTI_MODRM_TRUE, +/*opcode 0xa*/ PTI_MODRM_TRUE, +/*opcode 0xb*/ PTI_MODRM_TRUE, +/*opcode 0xc*/ PTI_MODRM_FALSE, +/*opcode 0xd*/ PTI_MODRM_FALSE, +/*opcode 0xe*/ PTI_MODRM_FALSE, +/*opcode 0xf*/ PTI_MODRM_UNDEF, +/*opcode 0x10*/ PTI_MODRM_TRUE, +/*opcode 0x11*/ PTI_MODRM_TRUE, +/*opcode 0x12*/ PTI_MODRM_TRUE, +/*opcode 0x13*/ PTI_MODRM_TRUE, +/*opcode 0x14*/ PTI_MODRM_FALSE, +/*opcode 0x15*/ PTI_MODRM_FALSE, +/*opcode 0x16*/ PTI_MODRM_FALSE, +/*opcode 0x17*/ PTI_MODRM_FALSE, +/*opcode 0x18*/ PTI_MODRM_TRUE, +/*opcode 0x19*/ PTI_MODRM_TRUE, +/*opcode 0x1a*/ PTI_MODRM_TRUE, +/*opcode 0x1b*/ PTI_MODRM_TRUE, +/*opcode 0x1c*/ PTI_MODRM_FALSE, +/*opcode 0x1d*/ PTI_MODRM_FALSE, +/*opcode 0x1e*/ PTI_MODRM_FALSE, +/*opcode 0x1f*/ PTI_MODRM_FALSE, +/*opcode 0x20*/ PTI_MODRM_TRUE, +/*opcode 0x21*/ PTI_MODRM_TRUE, +/*opcode 0x22*/ PTI_MODRM_TRUE, +/*opcode 0x23*/ PTI_MODRM_TRUE, +/*opcode 0x24*/ PTI_MODRM_FALSE, +/*opcode 0x25*/ PTI_MODRM_FALSE, +/*opcode 0x26*/ PTI_MODRM_UNDEF, +/*opcode 0x27*/ PTI_MODRM_FALSE, +/*opcode 0x28*/ PTI_MODRM_TRUE, +/*opcode 0x29*/ PTI_MODRM_TRUE, +/*opcode 0x2a*/ PTI_MODRM_TRUE, +/*opcode 0x2b*/ PTI_MODRM_TRUE, +/*opcode 0x2c*/ PTI_MODRM_FALSE, +/*opcode 0x2d*/ PTI_MODRM_FALSE, +/*opcode 0x2e*/ PTI_MODRM_UNDEF, +/*opcode 0x2f*/ PTI_MODRM_FALSE, +/*opcode 0x30*/ PTI_MODRM_TRUE, +/*opcode 0x31*/ PTI_MODRM_TRUE, +/*opcode 0x32*/ PTI_MODRM_TRUE, +/*opcode 0x33*/ PTI_MODRM_TRUE, +/*opcode 0x34*/ PTI_MODRM_FALSE, +/*opcode 0x35*/ PTI_MODRM_FALSE, +/*opcode 0x36*/ PTI_MODRM_UNDEF, +/*opcode 0x37*/ PTI_MODRM_FALSE, +/*opcode 0x38*/ PTI_MODRM_TRUE, +/*opcode 0x39*/ PTI_MODRM_TRUE, +/*opcode 0x3a*/ PTI_MODRM_TRUE, +/*opcode 0x3b*/ PTI_MODRM_TRUE, +/*opcode 0x3c*/ PTI_MODRM_FALSE, +/*opcode 0x3d*/ PTI_MODRM_FALSE, +/*opcode 0x3e*/ PTI_MODRM_UNDEF, +/*opcode 0x3f*/ PTI_MODRM_FALSE, +/*opcode 0x40*/ PTI_MODRM_FALSE, +/*opcode 0x41*/ PTI_MODRM_FALSE, +/*opcode 0x42*/ PTI_MODRM_FALSE, +/*opcode 0x43*/ PTI_MODRM_FALSE, +/*opcode 0x44*/ PTI_MODRM_FALSE, +/*opcode 0x45*/ PTI_MODRM_FALSE, +/*opcode 0x46*/ PTI_MODRM_FALSE, +/*opcode 0x47*/ PTI_MODRM_FALSE, +/*opcode 0x48*/ PTI_MODRM_FALSE, +/*opcode 0x49*/ PTI_MODRM_FALSE, +/*opcode 0x4a*/ PTI_MODRM_FALSE, +/*opcode 0x4b*/ PTI_MODRM_FALSE, +/*opcode 0x4c*/ PTI_MODRM_FALSE, +/*opcode 0x4d*/ PTI_MODRM_FALSE, +/*opcode 0x4e*/ PTI_MODRM_FALSE, +/*opcode 0x4f*/ PTI_MODRM_FALSE, +/*opcode 0x50*/ PTI_MODRM_FALSE, +/*opcode 0x51*/ PTI_MODRM_FALSE, +/*opcode 0x52*/ PTI_MODRM_FALSE, +/*opcode 0x53*/ PTI_MODRM_FALSE, +/*opcode 0x54*/ PTI_MODRM_FALSE, +/*opcode 0x55*/ PTI_MODRM_FALSE, +/*opcode 0x56*/ PTI_MODRM_FALSE, +/*opcode 0x57*/ PTI_MODRM_FALSE, +/*opcode 0x58*/ PTI_MODRM_FALSE, +/*opcode 0x59*/ PTI_MODRM_FALSE, +/*opcode 0x5a*/ PTI_MODRM_FALSE, +/*opcode 0x5b*/ PTI_MODRM_FALSE, +/*opcode 0x5c*/ PTI_MODRM_FALSE, +/*opcode 0x5d*/ PTI_MODRM_FALSE, +/*opcode 0x5e*/ PTI_MODRM_FALSE, +/*opcode 0x5f*/ PTI_MODRM_FALSE, +/*opcode 0x60*/ PTI_MODRM_FALSE, +/*opcode 0x61*/ PTI_MODRM_FALSE, +/*opcode 0x62*/ PTI_MODRM_TRUE, +/*opcode 0x63*/ PTI_MODRM_TRUE, +/*opcode 0x64*/ PTI_MODRM_UNDEF, +/*opcode 0x65*/ PTI_MODRM_UNDEF, +/*opcode 0x66*/ PTI_MODRM_UNDEF, +/*opcode 0x67*/ PTI_MODRM_UNDEF, +/*opcode 0x68*/ PTI_MODRM_FALSE, +/*opcode 0x69*/ PTI_MODRM_TRUE, +/*opcode 0x6a*/ PTI_MODRM_FALSE, +/*opcode 0x6b*/ PTI_MODRM_TRUE, +/*opcode 0x6c*/ PTI_MODRM_FALSE, +/*opcode 0x6d*/ PTI_MODRM_FALSE, +/*opcode 0x6e*/ PTI_MODRM_FALSE, +/*opcode 0x6f*/ PTI_MODRM_FALSE, +/*opcode 0x70*/ PTI_MODRM_FALSE, +/*opcode 0x71*/ PTI_MODRM_FALSE, +/*opcode 0x72*/ PTI_MODRM_FALSE, +/*opcode 0x73*/ PTI_MODRM_FALSE, +/*opcode 0x74*/ PTI_MODRM_FALSE, +/*opcode 0x75*/ PTI_MODRM_FALSE, +/*opcode 0x76*/ PTI_MODRM_FALSE, +/*opcode 0x77*/ PTI_MODRM_FALSE, +/*opcode 0x78*/ PTI_MODRM_FALSE, +/*opcode 0x79*/ PTI_MODRM_FALSE, +/*opcode 0x7a*/ PTI_MODRM_FALSE, +/*opcode 0x7b*/ PTI_MODRM_FALSE, +/*opcode 0x7c*/ PTI_MODRM_FALSE, +/*opcode 0x7d*/ PTI_MODRM_FALSE, +/*opcode 0x7e*/ PTI_MODRM_FALSE, +/*opcode 0x7f*/ PTI_MODRM_FALSE, +/*opcode 0x80*/ PTI_MODRM_TRUE, +/*opcode 0x81*/ PTI_MODRM_TRUE, +/*opcode 0x82*/ PTI_MODRM_TRUE, +/*opcode 0x83*/ PTI_MODRM_TRUE, +/*opcode 0x84*/ PTI_MODRM_TRUE, +/*opcode 0x85*/ PTI_MODRM_TRUE, +/*opcode 0x86*/ PTI_MODRM_TRUE, +/*opcode 0x87*/ PTI_MODRM_TRUE, +/*opcode 0x88*/ PTI_MODRM_TRUE, +/*opcode 0x89*/ PTI_MODRM_TRUE, +/*opcode 0x8a*/ PTI_MODRM_TRUE, +/*opcode 0x8b*/ PTI_MODRM_TRUE, +/*opcode 0x8c*/ PTI_MODRM_TRUE, +/*opcode 0x8d*/ PTI_MODRM_TRUE, +/*opcode 0x8e*/ PTI_MODRM_TRUE, +/*opcode 0x8f*/ PTI_MODRM_TRUE, +/*opcode 0x90*/ PTI_MODRM_FALSE, +/*opcode 0x91*/ PTI_MODRM_FALSE, +/*opcode 0x92*/ PTI_MODRM_FALSE, +/*opcode 0x93*/ PTI_MODRM_FALSE, +/*opcode 0x94*/ PTI_MODRM_FALSE, +/*opcode 0x95*/ PTI_MODRM_FALSE, +/*opcode 0x96*/ PTI_MODRM_FALSE, +/*opcode 0x97*/ PTI_MODRM_FALSE, +/*opcode 0x98*/ PTI_MODRM_FALSE, +/*opcode 0x99*/ PTI_MODRM_FALSE, +/*opcode 0x9a*/ PTI_MODRM_FALSE, +/*opcode 0x9b*/ PTI_MODRM_FALSE, +/*opcode 0x9c*/ PTI_MODRM_FALSE, +/*opcode 0x9d*/ PTI_MODRM_FALSE, +/*opcode 0x9e*/ PTI_MODRM_FALSE, +/*opcode 0x9f*/ PTI_MODRM_FALSE, +/*opcode 0xa0*/ PTI_MODRM_FALSE, +/*opcode 0xa1*/ PTI_MODRM_FALSE, +/*opcode 0xa2*/ PTI_MODRM_FALSE, +/*opcode 0xa3*/ PTI_MODRM_FALSE, +/*opcode 0xa4*/ PTI_MODRM_FALSE, +/*opcode 0xa5*/ PTI_MODRM_FALSE, +/*opcode 0xa6*/ PTI_MODRM_FALSE, +/*opcode 0xa7*/ PTI_MODRM_FALSE, +/*opcode 0xa8*/ PTI_MODRM_FALSE, +/*opcode 0xa9*/ PTI_MODRM_FALSE, +/*opcode 0xaa*/ PTI_MODRM_FALSE, +/*opcode 0xab*/ PTI_MODRM_FALSE, +/*opcode 0xac*/ PTI_MODRM_FALSE, +/*opcode 0xad*/ PTI_MODRM_FALSE, +/*opcode 0xae*/ PTI_MODRM_FALSE, +/*opcode 0xaf*/ PTI_MODRM_FALSE, +/*opcode 0xb0*/ PTI_MODRM_FALSE, +/*opcode 0xb1*/ PTI_MODRM_FALSE, +/*opcode 0xb2*/ PTI_MODRM_FALSE, +/*opcode 0xb3*/ PTI_MODRM_FALSE, +/*opcode 0xb4*/ PTI_MODRM_FALSE, +/*opcode 0xb5*/ PTI_MODRM_FALSE, +/*opcode 0xb6*/ PTI_MODRM_FALSE, +/*opcode 0xb7*/ PTI_MODRM_FALSE, +/*opcode 0xb8*/ PTI_MODRM_FALSE, +/*opcode 0xb9*/ PTI_MODRM_FALSE, +/*opcode 0xba*/ PTI_MODRM_FALSE, +/*opcode 0xbb*/ PTI_MODRM_FALSE, +/*opcode 0xbc*/ PTI_MODRM_FALSE, +/*opcode 0xbd*/ PTI_MODRM_FALSE, +/*opcode 0xbe*/ PTI_MODRM_FALSE, +/*opcode 0xbf*/ PTI_MODRM_FALSE, +/*opcode 0xc0*/ PTI_MODRM_TRUE, +/*opcode 0xc1*/ PTI_MODRM_TRUE, +/*opcode 0xc2*/ PTI_MODRM_FALSE, +/*opcode 0xc3*/ PTI_MODRM_FALSE, +/*opcode 0xc4*/ PTI_MODRM_TRUE, +/*opcode 0xc5*/ PTI_MODRM_TRUE, +/*opcode 0xc6*/ PTI_MODRM_TRUE, +/*opcode 0xc7*/ PTI_MODRM_TRUE, +/*opcode 0xc8*/ PTI_MODRM_FALSE, +/*opcode 0xc9*/ PTI_MODRM_FALSE, +/*opcode 0xca*/ PTI_MODRM_FALSE, +/*opcode 0xcb*/ PTI_MODRM_FALSE, +/*opcode 0xcc*/ PTI_MODRM_FALSE, +/*opcode 0xcd*/ PTI_MODRM_FALSE, +/*opcode 0xce*/ PTI_MODRM_FALSE, +/*opcode 0xcf*/ PTI_MODRM_FALSE, +/*opcode 0xd0*/ PTI_MODRM_TRUE, +/*opcode 0xd1*/ PTI_MODRM_TRUE, +/*opcode 0xd2*/ PTI_MODRM_TRUE, +/*opcode 0xd3*/ PTI_MODRM_TRUE, +/*opcode 0xd4*/ PTI_MODRM_FALSE, +/*opcode 0xd5*/ PTI_MODRM_FALSE, +/*opcode 0xd6*/ PTI_MODRM_FALSE, +/*opcode 0xd7*/ PTI_MODRM_FALSE, +/*opcode 0xd8*/ PTI_MODRM_TRUE, +/*opcode 0xd9*/ PTI_MODRM_TRUE, +/*opcode 0xda*/ PTI_MODRM_TRUE, +/*opcode 0xdb*/ PTI_MODRM_TRUE, +/*opcode 0xdc*/ PTI_MODRM_TRUE, +/*opcode 0xdd*/ PTI_MODRM_TRUE, +/*opcode 0xde*/ PTI_MODRM_TRUE, +/*opcode 0xdf*/ PTI_MODRM_TRUE, +/*opcode 0xe0*/ PTI_MODRM_FALSE, +/*opcode 0xe1*/ PTI_MODRM_FALSE, +/*opcode 0xe2*/ PTI_MODRM_FALSE, +/*opcode 0xe3*/ PTI_MODRM_FALSE, +/*opcode 0xe4*/ PTI_MODRM_FALSE, +/*opcode 0xe5*/ PTI_MODRM_FALSE, +/*opcode 0xe6*/ PTI_MODRM_FALSE, +/*opcode 0xe7*/ PTI_MODRM_FALSE, +/*opcode 0xe8*/ PTI_MODRM_FALSE, +/*opcode 0xe9*/ PTI_MODRM_FALSE, +/*opcode 0xea*/ PTI_MODRM_FALSE, +/*opcode 0xeb*/ PTI_MODRM_FALSE, +/*opcode 0xec*/ PTI_MODRM_FALSE, +/*opcode 0xed*/ PTI_MODRM_FALSE, +/*opcode 0xee*/ PTI_MODRM_FALSE, +/*opcode 0xef*/ PTI_MODRM_FALSE, +/*opcode 0xf0*/ PTI_MODRM_UNDEF, +/*opcode 0xf1*/ PTI_MODRM_FALSE, +/*opcode 0xf2*/ PTI_MODRM_UNDEF, +/*opcode 0xf3*/ PTI_MODRM_UNDEF, +/*opcode 0xf4*/ PTI_MODRM_FALSE, +/*opcode 0xf5*/ PTI_MODRM_FALSE, +/*opcode 0xf6*/ PTI_MODRM_TRUE, +/*opcode 0xf7*/ PTI_MODRM_TRUE, +/*opcode 0xf8*/ PTI_MODRM_FALSE, +/*opcode 0xf9*/ PTI_MODRM_FALSE, +/*opcode 0xfa*/ PTI_MODRM_FALSE, +/*opcode 0xfb*/ PTI_MODRM_FALSE, +/*opcode 0xfc*/ PTI_MODRM_FALSE, +/*opcode 0xfd*/ PTI_MODRM_FALSE, +/*opcode 0xfe*/ PTI_MODRM_TRUE, +/*opcode 0xff*/ PTI_MODRM_TRUE, +}; +static uint8_t has_modrm_map_0x0F[256] = { +/*opcode 0x0*/ PTI_MODRM_TRUE, +/*opcode 0x1*/ PTI_MODRM_TRUE, +/*opcode 0x2*/ PTI_MODRM_TRUE, +/*opcode 0x3*/ PTI_MODRM_TRUE, +/*opcode 0x4*/ PTI_MODRM_UNDEF, +/*opcode 0x5*/ PTI_MODRM_FALSE, +/*opcode 0x6*/ PTI_MODRM_FALSE, +/*opcode 0x7*/ PTI_MODRM_FALSE, +/*opcode 0x8*/ PTI_MODRM_FALSE, +/*opcode 0x9*/ PTI_MODRM_FALSE, +/*opcode 0xa*/ PTI_MODRM_UNDEF, +/*opcode 0xb*/ PTI_MODRM_FALSE, +/*opcode 0xc*/ PTI_MODRM_UNDEF, +/*opcode 0xd*/ PTI_MODRM_TRUE, +/*opcode 0xe*/ PTI_MODRM_FALSE, +/*opcode 0xf*/ PTI_MODRM_UNDEF, +/*opcode 0x10*/ PTI_MODRM_TRUE, +/*opcode 0x11*/ PTI_MODRM_TRUE, +/*opcode 0x12*/ PTI_MODRM_TRUE, +/*opcode 0x13*/ PTI_MODRM_TRUE, +/*opcode 0x14*/ PTI_MODRM_TRUE, +/*opcode 0x15*/ PTI_MODRM_TRUE, +/*opcode 0x16*/ PTI_MODRM_TRUE, +/*opcode 0x17*/ PTI_MODRM_TRUE, +/*opcode 0x18*/ PTI_MODRM_TRUE, +/*opcode 0x19*/ PTI_MODRM_TRUE, +/*opcode 0x1a*/ PTI_MODRM_TRUE, +/*opcode 0x1b*/ PTI_MODRM_TRUE, +/*opcode 0x1c*/ PTI_MODRM_TRUE, +/*opcode 0x1d*/ PTI_MODRM_TRUE, +/*opcode 0x1e*/ PTI_MODRM_TRUE, +/*opcode 0x1f*/ PTI_MODRM_TRUE, +/*opcode 0x20*/ PTI_MODRM_IGNORE_MOD, +/*opcode 0x21*/ PTI_MODRM_IGNORE_MOD, +/*opcode 0x22*/ PTI_MODRM_IGNORE_MOD, +/*opcode 0x23*/ PTI_MODRM_IGNORE_MOD, +/*opcode 0x24*/ PTI_MODRM_UNDEF, +/*opcode 0x25*/ PTI_MODRM_UNDEF, +/*opcode 0x26*/ PTI_MODRM_UNDEF, +/*opcode 0x27*/ PTI_MODRM_UNDEF, +/*opcode 0x28*/ PTI_MODRM_TRUE, +/*opcode 0x29*/ PTI_MODRM_TRUE, +/*opcode 0x2a*/ PTI_MODRM_TRUE, +/*opcode 0x2b*/ PTI_MODRM_TRUE, +/*opcode 0x2c*/ PTI_MODRM_TRUE, +/*opcode 0x2d*/ PTI_MODRM_TRUE, +/*opcode 0x2e*/ PTI_MODRM_TRUE, +/*opcode 0x2f*/ PTI_MODRM_TRUE, +/*opcode 0x30*/ PTI_MODRM_FALSE, +/*opcode 0x31*/ PTI_MODRM_FALSE, +/*opcode 0x32*/ PTI_MODRM_FALSE, +/*opcode 0x33*/ PTI_MODRM_FALSE, +/*opcode 0x34*/ PTI_MODRM_FALSE, +/*opcode 0x35*/ PTI_MODRM_FALSE, +/*opcode 0x36*/ PTI_MODRM_UNDEF, +/*opcode 0x37*/ PTI_MODRM_FALSE, +/*opcode 0x38*/ PTI_MODRM_UNDEF, +/*opcode 0x39*/ PTI_MODRM_UNDEF, +/*opcode 0x3a*/ PTI_MODRM_UNDEF, +/*opcode 0x3b*/ PTI_MODRM_UNDEF, +/*opcode 0x3c*/ PTI_MODRM_UNDEF, +/*opcode 0x3d*/ PTI_MODRM_UNDEF, +/*opcode 0x3e*/ PTI_MODRM_UNDEF, +/*opcode 0x3f*/ PTI_MODRM_UNDEF, +/*opcode 0x40*/ PTI_MODRM_TRUE, +/*opcode 0x41*/ PTI_MODRM_TRUE, +/*opcode 0x42*/ PTI_MODRM_TRUE, +/*opcode 0x43*/ PTI_MODRM_TRUE, +/*opcode 0x44*/ PTI_MODRM_TRUE, +/*opcode 0x45*/ PTI_MODRM_TRUE, +/*opcode 0x46*/ PTI_MODRM_TRUE, +/*opcode 0x47*/ PTI_MODRM_TRUE, +/*opcode 0x48*/ PTI_MODRM_TRUE, +/*opcode 0x49*/ PTI_MODRM_TRUE, +/*opcode 0x4a*/ PTI_MODRM_TRUE, +/*opcode 0x4b*/ PTI_MODRM_TRUE, +/*opcode 0x4c*/ PTI_MODRM_TRUE, +/*opcode 0x4d*/ PTI_MODRM_TRUE, +/*opcode 0x4e*/ PTI_MODRM_TRUE, +/*opcode 0x4f*/ PTI_MODRM_TRUE, +/*opcode 0x50*/ PTI_MODRM_TRUE, +/*opcode 0x51*/ PTI_MODRM_TRUE, +/*opcode 0x52*/ PTI_MODRM_TRUE, +/*opcode 0x53*/ PTI_MODRM_TRUE, +/*opcode 0x54*/ PTI_MODRM_TRUE, +/*opcode 0x55*/ PTI_MODRM_TRUE, +/*opcode 0x56*/ PTI_MODRM_TRUE, +/*opcode 0x57*/ PTI_MODRM_TRUE, +/*opcode 0x58*/ PTI_MODRM_TRUE, +/*opcode 0x59*/ PTI_MODRM_TRUE, +/*opcode 0x5a*/ PTI_MODRM_TRUE, +/*opcode 0x5b*/ PTI_MODRM_TRUE, +/*opcode 0x5c*/ PTI_MODRM_TRUE, +/*opcode 0x5d*/ PTI_MODRM_TRUE, +/*opcode 0x5e*/ PTI_MODRM_TRUE, +/*opcode 0x5f*/ PTI_MODRM_TRUE, +/*opcode 0x60*/ PTI_MODRM_TRUE, +/*opcode 0x61*/ PTI_MODRM_TRUE, +/*opcode 0x62*/ PTI_MODRM_TRUE, +/*opcode 0x63*/ PTI_MODRM_TRUE, +/*opcode 0x64*/ PTI_MODRM_TRUE, +/*opcode 0x65*/ PTI_MODRM_TRUE, +/*opcode 0x66*/ PTI_MODRM_TRUE, +/*opcode 0x67*/ PTI_MODRM_TRUE, +/*opcode 0x68*/ PTI_MODRM_TRUE, +/*opcode 0x69*/ PTI_MODRM_TRUE, +/*opcode 0x6a*/ PTI_MODRM_TRUE, +/*opcode 0x6b*/ PTI_MODRM_TRUE, +/*opcode 0x6c*/ PTI_MODRM_TRUE, +/*opcode 0x6d*/ PTI_MODRM_TRUE, +/*opcode 0x6e*/ PTI_MODRM_TRUE, +/*opcode 0x6f*/ PTI_MODRM_TRUE, +/*opcode 0x70*/ PTI_MODRM_TRUE, +/*opcode 0x71*/ PTI_MODRM_TRUE, +/*opcode 0x72*/ PTI_MODRM_TRUE, +/*opcode 0x73*/ PTI_MODRM_TRUE, +/*opcode 0x74*/ PTI_MODRM_TRUE, +/*opcode 0x75*/ PTI_MODRM_TRUE, +/*opcode 0x76*/ PTI_MODRM_TRUE, +/*opcode 0x77*/ PTI_MODRM_FALSE, +/*opcode 0x78*/ PTI_MODRM_TRUE, +/*opcode 0x79*/ PTI_MODRM_TRUE, +/*opcode 0x7a*/ PTI_MODRM_TRUE, +/*opcode 0x7b*/ PTI_MODRM_TRUE, +/*opcode 0x7c*/ PTI_MODRM_TRUE, +/*opcode 0x7d*/ PTI_MODRM_TRUE, +/*opcode 0x7e*/ PTI_MODRM_TRUE, +/*opcode 0x7f*/ PTI_MODRM_TRUE, +/*opcode 0x80*/ PTI_MODRM_FALSE, +/*opcode 0x81*/ PTI_MODRM_FALSE, +/*opcode 0x82*/ PTI_MODRM_FALSE, +/*opcode 0x83*/ PTI_MODRM_FALSE, +/*opcode 0x84*/ PTI_MODRM_FALSE, +/*opcode 0x85*/ PTI_MODRM_FALSE, +/*opcode 0x86*/ PTI_MODRM_FALSE, +/*opcode 0x87*/ PTI_MODRM_FALSE, +/*opcode 0x88*/ PTI_MODRM_FALSE, +/*opcode 0x89*/ PTI_MODRM_FALSE, +/*opcode 0x8a*/ PTI_MODRM_FALSE, +/*opcode 0x8b*/ PTI_MODRM_FALSE, +/*opcode 0x8c*/ PTI_MODRM_FALSE, +/*opcode 0x8d*/ PTI_MODRM_FALSE, +/*opcode 0x8e*/ PTI_MODRM_FALSE, +/*opcode 0x8f*/ PTI_MODRM_FALSE, +/*opcode 0x90*/ PTI_MODRM_TRUE, +/*opcode 0x91*/ PTI_MODRM_TRUE, +/*opcode 0x92*/ PTI_MODRM_TRUE, +/*opcode 0x93*/ PTI_MODRM_TRUE, +/*opcode 0x94*/ PTI_MODRM_TRUE, +/*opcode 0x95*/ PTI_MODRM_TRUE, +/*opcode 0x96*/ PTI_MODRM_TRUE, +/*opcode 0x97*/ PTI_MODRM_TRUE, +/*opcode 0x98*/ PTI_MODRM_TRUE, +/*opcode 0x99*/ PTI_MODRM_TRUE, +/*opcode 0x9a*/ PTI_MODRM_TRUE, +/*opcode 0x9b*/ PTI_MODRM_TRUE, +/*opcode 0x9c*/ PTI_MODRM_TRUE, +/*opcode 0x9d*/ PTI_MODRM_TRUE, +/*opcode 0x9e*/ PTI_MODRM_TRUE, +/*opcode 0x9f*/ PTI_MODRM_TRUE, +/*opcode 0xa0*/ PTI_MODRM_FALSE, +/*opcode 0xa1*/ PTI_MODRM_FALSE, +/*opcode 0xa2*/ PTI_MODRM_FALSE, +/*opcode 0xa3*/ PTI_MODRM_TRUE, +/*opcode 0xa4*/ PTI_MODRM_TRUE, +/*opcode 0xa5*/ PTI_MODRM_TRUE, +/*opcode 0xa6*/ PTI_MODRM_UNDEF, +/*opcode 0xa7*/ PTI_MODRM_UNDEF, +/*opcode 0xa8*/ PTI_MODRM_FALSE, +/*opcode 0xa9*/ PTI_MODRM_FALSE, +/*opcode 0xaa*/ PTI_MODRM_FALSE, +/*opcode 0xab*/ PTI_MODRM_TRUE, +/*opcode 0xac*/ PTI_MODRM_TRUE, +/*opcode 0xad*/ PTI_MODRM_TRUE, +/*opcode 0xae*/ PTI_MODRM_TRUE, +/*opcode 0xaf*/ PTI_MODRM_TRUE, +/*opcode 0xb0*/ PTI_MODRM_TRUE, +/*opcode 0xb1*/ PTI_MODRM_TRUE, +/*opcode 0xb2*/ PTI_MODRM_TRUE, +/*opcode 0xb3*/ PTI_MODRM_TRUE, +/*opcode 0xb4*/ PTI_MODRM_TRUE, +/*opcode 0xb5*/ PTI_MODRM_TRUE, +/*opcode 0xb6*/ PTI_MODRM_TRUE, +/*opcode 0xb7*/ PTI_MODRM_TRUE, +/*opcode 0xb8*/ PTI_MODRM_TRUE, +/*opcode 0xb9*/ PTI_MODRM_UNDEF, +/*opcode 0xba*/ PTI_MODRM_TRUE, +/*opcode 0xbb*/ PTI_MODRM_TRUE, +/*opcode 0xbc*/ PTI_MODRM_TRUE, +/*opcode 0xbd*/ PTI_MODRM_TRUE, +/*opcode 0xbe*/ PTI_MODRM_TRUE, +/*opcode 0xbf*/ PTI_MODRM_TRUE, +/*opcode 0xc0*/ PTI_MODRM_TRUE, +/*opcode 0xc1*/ PTI_MODRM_TRUE, +/*opcode 0xc2*/ PTI_MODRM_TRUE, +/*opcode 0xc3*/ PTI_MODRM_TRUE, +/*opcode 0xc4*/ PTI_MODRM_TRUE, +/*opcode 0xc5*/ PTI_MODRM_TRUE, +/*opcode 0xc6*/ PTI_MODRM_TRUE, +/*opcode 0xc7*/ PTI_MODRM_TRUE, +/*opcode 0xc8*/ PTI_MODRM_FALSE, +/*opcode 0xc9*/ PTI_MODRM_FALSE, +/*opcode 0xca*/ PTI_MODRM_FALSE, +/*opcode 0xcb*/ PTI_MODRM_FALSE, +/*opcode 0xcc*/ PTI_MODRM_FALSE, +/*opcode 0xcd*/ PTI_MODRM_FALSE, +/*opcode 0xce*/ PTI_MODRM_FALSE, +/*opcode 0xcf*/ PTI_MODRM_FALSE, +/*opcode 0xd0*/ PTI_MODRM_TRUE, +/*opcode 0xd1*/ PTI_MODRM_TRUE, +/*opcode 0xd2*/ PTI_MODRM_TRUE, +/*opcode 0xd3*/ PTI_MODRM_TRUE, +/*opcode 0xd4*/ PTI_MODRM_TRUE, +/*opcode 0xd5*/ PTI_MODRM_TRUE, +/*opcode 0xd6*/ PTI_MODRM_TRUE, +/*opcode 0xd7*/ PTI_MODRM_TRUE, +/*opcode 0xd8*/ PTI_MODRM_TRUE, +/*opcode 0xd9*/ PTI_MODRM_TRUE, +/*opcode 0xda*/ PTI_MODRM_TRUE, +/*opcode 0xdb*/ PTI_MODRM_TRUE, +/*opcode 0xdc*/ PTI_MODRM_TRUE, +/*opcode 0xdd*/ PTI_MODRM_TRUE, +/*opcode 0xde*/ PTI_MODRM_TRUE, +/*opcode 0xdf*/ PTI_MODRM_TRUE, +/*opcode 0xe0*/ PTI_MODRM_TRUE, +/*opcode 0xe1*/ PTI_MODRM_TRUE, +/*opcode 0xe2*/ PTI_MODRM_TRUE, +/*opcode 0xe3*/ PTI_MODRM_TRUE, +/*opcode 0xe4*/ PTI_MODRM_TRUE, +/*opcode 0xe5*/ PTI_MODRM_TRUE, +/*opcode 0xe6*/ PTI_MODRM_TRUE, +/*opcode 0xe7*/ PTI_MODRM_TRUE, +/*opcode 0xe8*/ PTI_MODRM_TRUE, +/*opcode 0xe9*/ PTI_MODRM_TRUE, +/*opcode 0xea*/ PTI_MODRM_TRUE, +/*opcode 0xeb*/ PTI_MODRM_TRUE, +/*opcode 0xec*/ PTI_MODRM_TRUE, +/*opcode 0xed*/ PTI_MODRM_TRUE, +/*opcode 0xee*/ PTI_MODRM_TRUE, +/*opcode 0xef*/ PTI_MODRM_TRUE, +/*opcode 0xf0*/ PTI_MODRM_TRUE, +/*opcode 0xf1*/ PTI_MODRM_TRUE, +/*opcode 0xf2*/ PTI_MODRM_TRUE, +/*opcode 0xf3*/ PTI_MODRM_TRUE, +/*opcode 0xf4*/ PTI_MODRM_TRUE, +/*opcode 0xf5*/ PTI_MODRM_TRUE, +/*opcode 0xf6*/ PTI_MODRM_TRUE, +/*opcode 0xf7*/ PTI_MODRM_TRUE, +/*opcode 0xf8*/ PTI_MODRM_TRUE, +/*opcode 0xf9*/ PTI_MODRM_TRUE, +/*opcode 0xfa*/ PTI_MODRM_TRUE, +/*opcode 0xfb*/ PTI_MODRM_TRUE, +/*opcode 0xfc*/ PTI_MODRM_TRUE, +/*opcode 0xfd*/ PTI_MODRM_TRUE, +/*opcode 0xfe*/ PTI_MODRM_TRUE, +/*opcode 0xff*/ PTI_MODRM_UNDEF, +}; diff --git a/libipt/internal/include/windows/pt_section_windows.h b/libipt/internal/include/windows/pt_section_windows.h new file mode 100644 index 0000000..3cdaac9 --- /dev/null +++ b/libipt/internal/include/windows/pt_section_windows.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PT_SECTION_WINDOWS_H +#define PT_SECTION_WINDOWS_H + +#include +#include +#include + +struct pt_section; + + +/* Fstat-based file status. */ +struct pt_sec_windows_status { + /* The file status. */ + struct _stat stat; +}; + +/* FileView-based section mapping information. */ +struct pt_sec_windows_mapping { + /* The file descriptor. */ + int fd; + + /* The FileMapping handle. */ + HANDLE mh; + + /* The mmap base address. */ + uint8_t *base; + + /* The begin and end of the mapped memory. */ + const uint8_t *begin, *end; +}; + + +/* Map a section. + * + * The caller has already opened the file for reading. + * + * On success, sets @section's mapping, unmap, and read pointers. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section is NULL. + * Returns -pte_invalid if @section can't be mapped. + */ +extern int pt_sec_windows_map(struct pt_section *section, int fd); + +/* Unmap a section. + * + * On success, clears @section's mapping, unmap, and read pointers. + * + * This function should not be called directly; call @section->unmap() instead. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @section is NULL. + * Returns -pte_internal if @section has not been mapped. + */ +extern int pt_sec_windows_unmap(struct pt_section *section); + +/* Read memory from an mmaped section. + * + * Reads at most @size bytes from @section at @offset into @buffer. + * + * This function should not be called directly; call @section->read() instead. + * + * Returns the number of bytes read on success, a negative error code otherwise. + * Returns -pte_invalid if @section or @buffer are NULL. + * Returns -pte_nomap if @offset is beyond the end of the section. + */ +extern int pt_sec_windows_read(const struct pt_section *section, + uint8_t *buffer, uint16_t size, + uint64_t offset); + +#endif /* PT_SECTION_WINDOWS_H */ diff --git a/libipt/src/posix/init.c b/libipt/src/posix/init.c new file mode 100644 index 0000000..00fffc1 --- /dev/null +++ b/libipt/src/posix/init.c @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_ild.h" + + +static void __attribute__((constructor)) init(void) +{ + /* Initialize the Intel(R) Processor Trace instruction decoder. */ + pt_ild_init(); +} diff --git a/libipt/src/posix/pt_cpuid.c b/libipt/src/posix/pt_cpuid.c new file mode 100644 index 0000000..a59ad9f --- /dev/null +++ b/libipt/src/posix/pt_cpuid.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_cpuid.h" + +#include + +extern void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + __get_cpuid(leaf, eax, ebx, ecx, edx); +} diff --git a/libipt/src/posix/pt_section_posix.c b/libipt/src/posix/pt_section_posix.c new file mode 100644 index 0000000..0415ed0 --- /dev/null +++ b/libipt/src/posix/pt_section_posix.c @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define _POSIX_C_SOURCE 1 +#define _DARWIN_C_SOURCE 1 + +#include "pt_section.h" +#include "pt_section_posix.h" +#include "pt_section_file.h" + +#include "intel-pt.h" + +#include +#include +#include +#include +#include +#include +#include + + +int pt_section_mk_status(void **pstatus, uint64_t *psize, const char *filename) +{ + struct pt_sec_posix_status *status; + struct stat buffer; + int errcode; + + if (!pstatus || !psize) + return -pte_internal; + + errcode = stat(filename, &buffer); + if (errcode < 0) + return errcode; + + if (buffer.st_size < 0) + return -pte_bad_image; + + status = malloc(sizeof(*status)); + if (!status) + return -pte_nomem; + + status->stat = buffer; + + *pstatus = status; + *psize = buffer.st_size; + + return 0; +} + +static int check_file_status(struct pt_section *section, int fd) +{ + struct pt_sec_posix_status *status; + struct stat stat; + int errcode; + + if (!section) + return -pte_internal; + + errcode = fstat(fd, &stat); + if (errcode) + return -pte_bad_image; + + status = section->status; + if (!status) + return -pte_internal; + + if (stat.st_size != status->stat.st_size) + return -pte_bad_image; + + if (stat.st_mtime != status->stat.st_mtime) + return -pte_bad_image; + + return 0; +} + +int pt_sec_posix_map(struct pt_section *section, int fd) +{ + struct pt_sec_posix_mapping *mapping; + uint64_t offset, size, adjustment; + uint8_t *base; + + if (!section) + return -pte_internal; + + offset = section->offset; + size = section->size; + + adjustment = offset % sysconf(_SC_PAGESIZE); + + offset -= adjustment; + size += adjustment; + + /* The section is supposed to fit into the file so we shouldn't + * see any overflows, here. + */ + if (size < section->size) + return -pte_internal; + + base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, offset); + if (base == MAP_FAILED) + return -pte_nomem; + + mapping = malloc(sizeof(*mapping)); + if (!mapping) + goto out_map; + + mapping->base = base; + mapping->size = size; + mapping->begin = base + adjustment; + mapping->end = base + size; + + section->mapping = mapping; + section->unmap = pt_sec_posix_unmap; + section->read = pt_sec_posix_read; + + return pt_section_add_bcache(section); + +out_map: + munmap(base, size); + return -pte_nomem; +} + +int pt_section_map(struct pt_section *section) +{ + const char *filename; + uint16_t mcount; + FILE *file; + int fd, errcode; + + if (!section) + return -pte_internal; + + errcode = pt_section_lock(section); + if (errcode < 0) + return errcode; + + mcount = section->mcount + 1; + if (mcount > 1) { + section->mcount = mcount; + return pt_section_unlock(section); + } + + errcode = -pte_internal; + if (!mcount) + goto out_unlock; + + if (section->mapping) + goto out_unlock; + + filename = section->filename; + if (!filename) + goto out_unlock; + + errcode = -pte_bad_image; + fd = open(filename, O_RDONLY); + if (fd == -1) + goto out_unlock; + + errcode = check_file_status(section, fd); + if (errcode < 0) + goto out_fd; + + /* We close the file on success. This does not unmap the section. */ + errcode = pt_sec_posix_map(section, fd); + if (!errcode) { + section->mcount = 1; + close(fd); + return pt_section_unlock(section); + } + + /* Fall back to file based sections - report the original error + * if we fail to convert the file descriptor. + */ + file = fdopen(fd, "rb"); + if (!file) + goto out_fd; + + /* We need to keep the file open on success. It will be closed when + * the section is unmapped. + */ + errcode = pt_sec_file_map(section, file); + if (!errcode) { + section->mcount = 1; + return pt_section_unlock(section); + } + + fclose(file); + goto out_unlock; + +out_fd: + close(fd); + +out_unlock: + (void) pt_section_unlock(section); + return errcode; +} + +int pt_sec_posix_unmap(struct pt_section *section) +{ + struct pt_sec_posix_mapping *mapping; + + if (!section) + return -pte_internal; + + mapping = section->mapping; + if (!mapping || !section->unmap || !section->read) + return -pte_internal; + + section->mapping = NULL; + section->unmap = NULL; + section->read = NULL; + + munmap(mapping->base, mapping->size); + free(mapping); + + return 0; +} + +int pt_sec_posix_read(const struct pt_section *section, uint8_t *buffer, + uint16_t size, uint64_t offset) +{ + struct pt_sec_posix_mapping *mapping; + const uint8_t *begin; + + if (!buffer || !section) + return -pte_internal; + + mapping = section->mapping; + if (!mapping) + return -pte_internal; + + /* We already checked in pt_section_read() that the requested memory + * lies within the section's boundaries. + * + * And we checked that the entire section was mapped. There's no need + * to check for overflows, again. + */ + begin = mapping->begin + offset; + + memcpy(buffer, begin, size); + return (int) size; +} diff --git a/libipt/src/pt_asid.c b/libipt/src/pt_asid.c new file mode 100644 index 0000000..2899667 --- /dev/null +++ b/libipt/src/pt_asid.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_asid.h" + +#include "intel-pt.h" + +#include + + +int pt_asid_from_user(struct pt_asid *asid, const struct pt_asid *user) +{ + if (!asid) + return -pte_internal; + + pt_asid_init(asid); + + if (user) { + size_t size; + + size = user->size; + + /* Ignore fields in the user's asid we don't know. */ + if (sizeof(*asid) < size) + size = sizeof(*asid); + + /* Copy (portions of) the user's asid. */ + memcpy(asid, user, size); + + /* We copied user's size - fix it. */ + asid->size = sizeof(*asid); + } + + return 0; +} + +int pt_asid_match(const struct pt_asid *lhs, const struct pt_asid *rhs) +{ + uint64_t lcr3, rcr3, lvmcs, rvmcs; + + if (!lhs || !rhs) + return -pte_internal; + + lcr3 = lhs->cr3; + rcr3 = rhs->cr3; + + if (lcr3 != rcr3 && lcr3 != pt_asid_no_cr3 && rcr3 != pt_asid_no_cr3) + return 0; + + lvmcs = lhs->vmcs; + rvmcs = rhs->vmcs; + + if (lvmcs != rvmcs && lvmcs != pt_asid_no_vmcs && + rvmcs != pt_asid_no_vmcs) + return 0; + + return 1; +} diff --git a/libipt/src/pt_block_cache.c b/libipt/src/pt_block_cache.c new file mode 100644 index 0000000..338678a --- /dev/null +++ b/libipt/src/pt_block_cache.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2016-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_block_cache.h" + +#include +#include + + +struct pt_block_cache *pt_bcache_alloc(uint64_t nentries) +{ + struct pt_block_cache *bcache; + uint64_t size; + + if (!nentries || (UINT32_MAX < nentries)) + return NULL; + + size = sizeof(*bcache) + (nentries * sizeof(struct pt_bcache_entry)); + if (SIZE_MAX < size) + return NULL; + + bcache = malloc((size_t) size); + if (!bcache) + return NULL; + + memset(bcache, 0, (size_t) size); + bcache->nentries = (uint32_t) nentries; + + return bcache; +} + +void pt_bcache_free(struct pt_block_cache *bcache) +{ + free(bcache); +} + +int pt_bcache_add(struct pt_block_cache *bcache, uint64_t index, + struct pt_bcache_entry bce) +{ + if (!bcache) + return -pte_internal; + + if (bcache->nentries <= index) + return -pte_internal; + + /* We rely on guaranteed atomic operations as specified in section 8.1.1 + * in Volume 3A of the Intel(R) Software Developer's Manual at + * http://www.intel.com/sdm. + */ + bcache->entry[(uint32_t) index] = bce; + + return 0; +} + +int pt_bcache_lookup(struct pt_bcache_entry *bce, + const struct pt_block_cache *bcache, uint64_t index) +{ + if (!bce || !bcache) + return -pte_internal; + + if (bcache->nentries <= index) + return -pte_internal; + + /* We rely on guaranteed atomic operations as specified in section 8.1.1 + * in Volume 3A of the Intel(R) Software Developer's Manual at + * http://www.intel.com/sdm. + */ + *bce = bcache->entry[(uint32_t) index]; + + return 0; +} diff --git a/libipt/src/pt_block_decoder.c b/libipt/src/pt_block_decoder.c new file mode 100644 index 0000000..8dffbc2 --- /dev/null +++ b/libipt/src/pt_block_decoder.c @@ -0,0 +1,2961 @@ +/* + * Copyright (c) 2016-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_block_decoder.h" +#include "pt_block_cache.h" +#include "pt_section.h" +#include "pt_image.h" +#include "pt_insn.h" +#include "pt_ild.h" +#include "pt_config.h" + +#include "intel-pt.h" + +#include +#include + + +static int pt_blk_proceed_no_event(struct pt_block_decoder *, + struct pt_block *); + + +/* Release a cached section. + * + * If @scache does not contain a section, this does noting. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal, if @scache is NULL. + */ +static int pt_blk_scache_invalidate(struct pt_cached_section *scache) +{ + struct pt_section *section; + int errcode; + + if (!scache) + return -pte_internal; + + section = scache->section; + if (!section) + return 0; + + errcode = pt_section_unmap(section); + if (errcode < 0) + return errcode; + + scache->section = NULL; + + return pt_section_put(section); +} + +/* Cache @section loaded at @laddr identified by @isid in @scache. + * + * The caller transfers its use- and map-count to @scache. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @scache or @section is NULL. + * Returns -pte_internal if another section is already cached. + */ +static int pt_blk_cache_section(struct pt_cached_section *scache, + struct pt_section *section, uint64_t laddr, + int isid) +{ + if (!scache || !section) + return -pte_internal; + + if (scache->section) + return -pte_internal; + + scache->section = section; + scache->laddr = laddr; + scache->isid = isid; + + return 0; +} + +/* Get @scache's cached section. + * + * Check whether @scache contains a section that an image lookup of @ip in @asid + * would return. On success, provides the cached section in @psection and its + * load address in @pladdr. + * + * Returns the section's identifier on success, a negative error code otherwise. + * Returns -pte_internal if @scache, @psection, or @pladdr is NULL. + * Returns -pte_nomap if @scache does not have a section cached. + * Returns -pte_nomap if @scache's cached section does not contain @ip. + */ +static int pt_blk_cached_section(struct pt_cached_section *scache, + struct pt_section **psection, uint64_t *pladdr, + struct pt_image *image, struct pt_asid *asid, + uint64_t ip) +{ + struct pt_section *section; + uint64_t laddr; + int isid, errcode; + + if (!scache || !psection || !pladdr) + return -pte_internal; + + + section = scache->section; + laddr = scache->laddr; + isid = scache->isid; + if (!section) + return -pte_nomap; + + errcode = pt_image_validate(image, asid, ip, section, laddr, isid); + if (errcode < 0) + return errcode; + + *psection = section; + *pladdr = laddr; + + return isid; +} + +static void pt_blk_reset(struct pt_block_decoder *decoder) +{ + if (!decoder) + return; + + decoder->mode = ptem_unknown; + decoder->ip = 0ull; + decoder->status = 0; + decoder->enabled = 0; + decoder->process_event = 0; + decoder->speculative = 0; + + pt_retstack_init(&decoder->retstack); + pt_asid_init(&decoder->asid); +} + +/* Initialize the query decoder flags based on our flags. */ + +static int pt_blk_init_qry_flags(struct pt_conf_flags *qflags, + const struct pt_conf_flags *flags) +{ + if (!qflags || !flags) + return -pte_internal; + + memset(qflags, 0, sizeof(*qflags)); + + return 0; +} + +int pt_blk_decoder_init(struct pt_block_decoder *decoder, + const struct pt_config *uconfig) +{ + struct pt_config config; + int errcode; + + if (!decoder) + return -pte_internal; + + errcode = pt_config_from_user(&config, uconfig); + if (errcode < 0) + return errcode; + + /* The user supplied decoder flags. */ + decoder->flags = config.flags; + + /* Set the flags we need for the query decoder we use. */ + errcode = pt_blk_init_qry_flags(&config.flags, &decoder->flags); + if (errcode < 0) + return errcode; + + errcode = pt_qry_decoder_init(&decoder->query, &config); + if (errcode < 0) + return errcode; + + pt_image_init(&decoder->default_image, NULL); + decoder->image = &decoder->default_image; + + memset(&decoder->scache, 0, sizeof(decoder->scache)); + + pt_blk_reset(decoder); + + return 0; +} + +void pt_blk_decoder_fini(struct pt_block_decoder *decoder) +{ + if (!decoder) + return; + + /* Release the cached section so we don't leak it. */ + (void) pt_blk_scache_invalidate(&decoder->scache); + + pt_image_fini(&decoder->default_image); + pt_qry_decoder_fini(&decoder->query); +} + +struct pt_block_decoder * +pt_blk_alloc_decoder(const struct pt_config *config) +{ + struct pt_block_decoder *decoder; + int errcode; + + decoder = malloc(sizeof(*decoder)); + if (!decoder) + return NULL; + + errcode = pt_blk_decoder_init(decoder, config); + if (errcode < 0) { + free(decoder); + return NULL; + } + + return decoder; +} + +void pt_blk_free_decoder(struct pt_block_decoder *decoder) +{ + if (!decoder) + return; + + pt_blk_decoder_fini(decoder); + free(decoder); +} + +static int pt_blk_start(struct pt_block_decoder *decoder, int status) +{ + if (!decoder) + return -pte_internal; + + if (status < 0) + return status; + + decoder->status = status; + if (!(status & pts_ip_suppressed)) + decoder->enabled = 1; + + return 0; +} + +static int pt_blk_sync_reset(struct pt_block_decoder *decoder) +{ + if (!decoder) + return -pte_internal; + + pt_blk_reset(decoder); + + return 0; +} + +int pt_blk_sync_forward(struct pt_block_decoder *decoder) +{ + int errcode, status; + + if (!decoder) + return -pte_invalid; + + errcode = pt_blk_sync_reset(decoder); + if (errcode < 0) + return errcode; + + status = pt_qry_sync_forward(&decoder->query, &decoder->ip); + + return pt_blk_start(decoder, status); +} + +int pt_blk_sync_backward(struct pt_block_decoder *decoder) +{ + int errcode, status; + + if (!decoder) + return -pte_invalid; + + errcode = pt_blk_sync_reset(decoder); + if (errcode < 0) + return errcode; + + status = pt_qry_sync_backward(&decoder->query, &decoder->ip); + + return pt_blk_start(decoder, status); +} + +int pt_blk_sync_set(struct pt_block_decoder *decoder, uint64_t offset) +{ + int errcode, status; + + if (!decoder) + return -pte_invalid; + + errcode = pt_blk_sync_reset(decoder); + if (errcode < 0) + return errcode; + + status = pt_qry_sync_set(&decoder->query, &decoder->ip, offset); + + return pt_blk_start(decoder, status); +} + +int pt_blk_get_offset(struct pt_block_decoder *decoder, uint64_t *offset) +{ + if (!decoder) + return -pte_invalid; + + return pt_qry_get_offset(&decoder->query, offset); +} + +int pt_blk_get_sync_offset(struct pt_block_decoder *decoder, uint64_t *offset) +{ + if (!decoder) + return -pte_invalid; + + return pt_qry_get_sync_offset(&decoder->query, offset); +} + +struct pt_image *pt_blk_get_image(struct pt_block_decoder *decoder) +{ + if (!decoder) + return NULL; + + return decoder->image; +} + +int pt_blk_set_image(struct pt_block_decoder *decoder, struct pt_image *image) +{ + if (!decoder) + return -pte_invalid; + + if (!image) + image = &decoder->default_image; + + decoder->image = image; + return 0; +} + +const struct pt_config * +pt_blk_get_config(const struct pt_block_decoder *decoder) +{ + if (!decoder) + return NULL; + + return pt_qry_get_config(&decoder->query); +} + +int pt_blk_time(struct pt_block_decoder *decoder, uint64_t *time, + uint32_t *lost_mtc, uint32_t *lost_cyc) +{ + if (!decoder || !time) + return -pte_invalid; + + return pt_qry_time(&decoder->query, time, lost_mtc, lost_cyc); +} + +int pt_blk_core_bus_ratio(struct pt_block_decoder *decoder, uint32_t *cbr) +{ + if (!decoder || !cbr) + return -pte_invalid; + + return pt_qry_core_bus_ratio(&decoder->query, cbr); +} + +/* Fetch the next pending event. + * + * Checks for pending events. If an event is pending, fetches it (if not + * already in process). + * + * Returns zero if no event is pending. + * Returns a positive integer if an event is pending or in process. + * Returns a negative error code otherwise. + */ +static inline int pt_blk_fetch_event(struct pt_block_decoder *decoder) +{ + int status; + + if (!decoder) + return -pte_internal; + + if (decoder->process_event) + return 1; + + if (!(decoder->status & pts_event_pending)) + return 0; + + status = pt_qry_event(&decoder->query, &decoder->event, + sizeof(decoder->event)); + if (status < 0) + return status; + + decoder->process_event = 1; + decoder->status = status; + + return 1; +} + +static inline int pt_blk_block_is_empty(const struct pt_block *block) +{ + if (!block) + return 1; + + return !block->ninsn; +} + +static inline int block_to_user(struct pt_block *ublock, size_t size, + const struct pt_block *block) +{ + if (!ublock || !block) + return -pte_internal; + + if (ublock == block) + return 0; + + /* Zero out any unknown bytes. */ + if (sizeof(*block) < size) { + memset(ublock + sizeof(*block), 0, size - sizeof(*block)); + + size = sizeof(*block); + } + + memcpy(ublock, block, size); + + return 0; +} + +static int pt_insn_false(const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + (void) insn; + (void) iext; + + return 0; +} + +/* Determine the next IP using trace. + * + * Tries to determine the IP of the next instruction using trace and provides it + * in @pip. + * + * Not requiring trace to determine the IP is treated as an internal error. + * + * Does not update the return compression stack for indirect calls. This is + * expected to have been done, already, when trying to determine the next IP + * without using trace. + * + * Does not update @decoder->status. The caller is expected to do that. + * + * Returns a non-negative pt_status_flag bit-vector on success, a negative error + * code otherwise. + * Returns -pte_internal if @pip, @decoder, @insn, or @iext are NULL. + * Returns -pte_internal if no trace is required. + */ +static int pt_blk_next_ip(uint64_t *pip, struct pt_block_decoder *decoder, + const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + int status; + + if (!pip || !decoder || !insn || !iext) + return -pte_internal; + + /* We handle non-taken conditional branches, and compressed returns + * directly in the switch. + * + * All kinds of branches are handled below the switch. + */ + switch (insn->iclass) { + case ptic_cond_jump: { + uint64_t ip; + int taken; + + status = pt_qry_cond_branch(&decoder->query, &taken); + if (status < 0) + return status; + + ip = insn->ip + insn->size; + if (taken) + ip += iext->variant.branch.displacement; + + *pip = ip; + return status; + } + + case ptic_return: { + int taken, errcode; + + /* Check for a compressed return. */ + status = pt_qry_cond_branch(&decoder->query, &taken); + if (status < 0) { + if (status != -pte_bad_query) + return status; + + break; + } + + /* A compressed return is indicated by a taken conditional + * branch. + */ + if (!taken) + return -pte_bad_retcomp; + + errcode = pt_retstack_pop(&decoder->retstack, pip); + if (errcode < 0) + return errcode; + + return status; + } + + case ptic_jump: + case ptic_call: + /* A direct jump or call wouldn't require trace. */ + if (iext->variant.branch.is_direct) + return -pte_internal; + + break; + + case ptic_far_call: + case ptic_far_return: + case ptic_far_jump: + break; + + case ptic_other: + return -pte_internal; + + case ptic_error: + return -pte_bad_insn; + } + + /* Process an indirect branch. + * + * This covers indirect jumps and calls, non-compressed returns, and all + * flavors of far transfers. + */ + return pt_qry_indirect_branch(&decoder->query, pip); +} + +/* Process an enabled event. + * + * Determines whether the enabled event can be processed in this iteration or + * has to be postponed. + * + * Returns a positive integer if the event has been processed. + * Returns zero if the event shall be postponed. + * Returns a negative error code otherwise. + */ +static int pt_blk_process_enabled(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + if (!decoder || !block || !ev) + return -pte_internal; + + /* This event can't be a status update. */ + if (ev->status_update) + return -pte_bad_context; + + /* We must have an IP in order to start decoding. */ + if (ev->ip_suppressed) + return -pte_noip; + + /* We must currently be disabled. */ + if (decoder->enabled) + return -pte_bad_context; + + /* Delay processing of the event if the block is alredy in progress. */ + if (!pt_blk_block_is_empty(block)) + return 0; + + /* Check if we resumed from a preceding disable or if we enabled at a + * different position. + */ + if (ev->variant.enabled.ip == decoder->ip && !block->enabled) + block->resumed = 1; + else { + block->enabled = 1; + block->resumed = 0; + } + + /* Clear an indication of a preceding disable. */ + block->disabled = 0; + + block->ip = decoder->ip = ev->variant.enabled.ip; + decoder->enabled = 1; + decoder->process_event = 0; + + return 1; +} + +/* Apply a disabled event. + * + * This is used for proceed events and for trailing events. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_apply_disabled(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + if (!decoder || !block || !ev) + return -pte_internal; + + /* This event can't be a status update. */ + if (ev->status_update) + return -pte_bad_context; + + /* We must currently be enabled. */ + if (!decoder->enabled) + return -pte_bad_context; + + /* We preserve @decoder->ip. This is where we expect tracing to resume + * and we'll indicate that on the subsequent enabled event if tracing + * actually does resume from there. + */ + decoder->enabled = 0; + decoder->process_event = 0; + + block->disabled = 1; + + return 0; +} + +/* Process a disabled event. + * + * We reached the location of a disabled event. This ends a non-empty block. + * + * We may see disabled events for empty blocks when we have a series of enables + * and disabled on the same IP without any trace in between. We ignore the + * disabled event in this case and proceed. + * + * Returns a positive integer if the event has been processed. + * Returns zero if the event shall be postponed. + * Returns a negative error code otherwise. + */ +static int pt_blk_process_disabled(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + int errcode; + + if (!block) + return -pte_internal; + + errcode = pt_blk_apply_disabled(decoder, block, ev); + if (errcode < 0) + return errcode; + + /* The event completes a non-empty block. */ + if (!pt_blk_block_is_empty(block)) + return 0; + + /* Ignore the disable if the block is empty. */ + block->disabled = 0; + + return 1; +} + +/* Apply an asynchronous branch event. + * + * This is used for proceed events and for trailing events. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_apply_async_branch(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + if (!decoder || !block || !ev) + return -pte_internal; + + /* This event can't be a status update. */ + if (ev->status_update) + return -pte_bad_context; + + /* We must currently be enabled. */ + if (!decoder->enabled) + return -pte_bad_context; + + /* Indicate the async branch as an interrupt. This ends the block. */ + block->interrupted = 1; + + /* Jump to the branch destination. We will continue from there in the + * next iteration. + */ + decoder->ip = ev->variant.async_branch.to; + decoder->process_event = 0; + + return 0; +} + +/* Process an asynchronous branch event. + * + * We reached the source location of an asynchronous branch. This ends a + * non-empty block. + * + * We may come across an asynchronous branch for an empty block, e.g. when + * tracing just started. We ignore the event in that case and proceed. It will + * look like tracing started at the asynchronous branch destination instead of + * at its source. + * + * Returns a positive integer if the event has been processed. + * Returns zero if the event shall be postponed. + * Returns a negative error code otherwise. + */ +static int pt_blk_process_async_branch(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + int errcode; + + if (!block) + return -pte_internal; + + errcode = pt_blk_apply_async_branch(decoder, block, ev); + if (errcode < 0) + return errcode; + + if (!pt_blk_block_is_empty(block)) + return 0; + + /* We may still change the start IP for an empty block. Do not indicate + * the interrupt in this case. + */ + block->interrupted = 0; + block->ip = decoder->ip; + + return 1; +} + +/* Apply a paging event. + * + * This is used for proceed events and for trailing events. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_apply_paging(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + (void) block; + + if (!decoder || !ev) + return -pte_internal; + + decoder->asid.cr3 = ev->variant.paging.cr3; + decoder->process_event = 0; + + return 0; +} + +/* Apply a vmcs event. + * + * This is used for proceed events and for trailing events. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_apply_vmcs(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + (void) block; + + if (!decoder || !ev) + return -pte_internal; + + decoder->asid.vmcs = ev->variant.vmcs.base; + decoder->process_event = 0; + + return 0; +} + +/* Process an overflow event. + * + * An overflow ends a non-empty block. The overflow itself is indicated in the + * next block. Indicate the overflow and resume in this case. + * + * Returns a positive integer if the event has been processed. + * Returns zero if the event shall be postponed. + * Returns a negative error code otherwise. + */ +static int pt_blk_process_overflow(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + if (!decoder || !block || !ev) + return -pte_internal; + + /* This event can't be a status update. */ + if (ev->status_update) + return -pte_bad_context; + + /* The overflow ends a non-empty block. We will process the event in + * the next iteration. + */ + if (!pt_blk_block_is_empty(block)) + return 0; + + /* If the IP is suppressed, the overflow resolved while tracing was + * disabled. Otherwise it resolved while tracing was enabled. + */ + if (ev->ip_suppressed) { + /* Tracing is disabled. It doesn't make sense to preserve the + * previous IP. This will just be misleading. Even if tracing + * had been disabled before, as well, we might have missed the + * re-enable in the overflow. + */ + decoder->enabled = 0; + decoder->ip = 0ull; + + /* Indicate the overflow. Since tracing is disabled, the block + * will remain empty until tracing gets re-enabled again. + * + * When the block is eventually returned it will have the resync + * and the enabled bit set to indicate the the overflow resolved + * before tracing was enabled. + */ + block->resynced = 1; + } else { + /* Tracing is enabled and we're at the IP at which the overflow + * resolved. + */ + decoder->enabled = 1; + decoder->ip = ev->variant.overflow.ip; + + /* Indicate the overflow and set the start IP. The block is + * empty so we may still change it. + * + * We do not indicate a tracing enable if tracing had been + * disabled before to distinguish this from the above case. + */ + block->resynced = 1; + block->ip = decoder->ip; + } + + /* We don't know the TSX state. Let's assume we execute normally. + * + * We also don't know the execution mode. Let's keep what we have + * in case we don't get an update before we have to decode the next + * instruction. + */ + decoder->speculative = 0; + decoder->process_event = 0; + + return 1; +} + +/* Apply an exec mode event. + * + * This is used for proceed events and for trailing events. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_apply_exec_mode(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + enum pt_exec_mode mode; + + if (!decoder || !block || !ev) + return -pte_internal; + + /* Use status update events to diagnose inconsistencies. */ + mode = ev->variant.exec_mode.mode; + if (ev->status_update && decoder->enabled && + decoder->mode != ptem_unknown && decoder->mode != mode) + return -pte_bad_status_update; + + decoder->mode = mode; + decoder->process_event = 0; + + return 0; +} + +/* Process an exec mode event. + * + * We reached the location of an exec mode event. Update the exec mode and + * proceed. + * + * Returns a positive integer if the event has been processed. + * Returns zero if the event shall be postponed. + * Returns a negative error code otherwise. + */ +static int pt_blk_process_exec_mode(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + int errcode; + + if (!decoder || !block) + return -pte_internal; + + errcode = pt_blk_apply_exec_mode(decoder, block, ev); + if (errcode < 0) + return errcode; + + /* An execution mode change ends a non-empty block. */ + if (!pt_blk_block_is_empty(block)) + return 0; + + /* We may still change the execution mode of an empty block. */ + block->mode = decoder->mode; + + return 1; +} + +/* Apply a tsx event. + * + * This is used for proceed events and for trailing events. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_apply_tsx(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + if (!decoder || !block || !ev) + return -pte_internal; + + decoder->speculative = ev->variant.tsx.speculative; + decoder->process_event = 0; + + if (decoder->enabled && !pt_blk_block_is_empty(block)) { + if (ev->variant.tsx.aborted) + block->aborted = 1; + else if (block->speculative && !ev->variant.tsx.speculative) + block->committed = 1; + } + + return 0; +} + +/* Process a tsx event. + * + * We reached the location of a tsx event. A speculation mode change ends a + * non-empty block. Indicate commit or abort in the ended block. + * + * We might see tsx event while tracing is disabled or for empty blocks, e.g. if + * tracing was just enabled. In this case we do not indicate the abort or + * commit. + * + * Returns a positive integer if the event has been processed. + * Returns zero if the event shall be postponed. + * Returns a negative error code otherwise. + */ +static int pt_blk_process_tsx(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + int errcode; + + if (!decoder || !block) + return -pte_internal; + + errcode = pt_blk_apply_tsx(decoder, block, ev); + if (errcode < 0) + return errcode; + + /* A speculation mode change ends a non-empty block. */ + if (!pt_blk_block_is_empty(block)) + return 0; + + /* We may still change the speculation mode of an empty block. */ + block->speculative = decoder->speculative; + + return 1; +} + +/* Apply a stop event. + * + * This is used for proceed events and for trailing events. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_apply_stop(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + if (!decoder || !block || !ev) + return -pte_internal; + + /* This event can't be a status update. */ + if (ev->status_update) + return -pte_bad_context; + + /* Tracing is always disabled before it is stopped. */ + if (decoder->enabled) + return -pte_bad_context; + + decoder->process_event = 0; + + /* Indicate the stop. */ + block->stopped = 1; + + return 0; +} + +/* Proceed to the next IP using trace. + * + * We failed to proceed without trace. This ends the current block. Now use + * trace to do one final step to determine the start IP of the next block. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_proceed_with_trace(struct pt_block_decoder *decoder, + const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + int status; + + if (!decoder) + return -pte_internal; + + status = pt_blk_next_ip(&decoder->ip, decoder, insn, iext); + if (status < 0) + return status; + + /* Preserve the query decoder's response which indicates upcoming + * events. + */ + decoder->status = status; + + /* We do need an IP in order to proceed. */ + if (status & pts_ip_suppressed) + return -pte_noip; + + return 0; +} + +/* Decode one instruction in a known section. + * + * Decode the instruction at @insn->ip in @section loaded at @laddr assuming + * execution mode @insn->mode. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_decode_in_section(struct pt_insn *insn, + struct pt_insn_ext *iext, + const struct pt_section *section, + uint64_t laddr) +{ + int status; + + if (!insn || !iext) + return -pte_internal; + + /* We know that @ip is contained in @section. + * + * Note that we need to translate @ip into a section offset. + */ + status = pt_section_read(section, insn->raw, sizeof(insn->raw), + insn->ip - laddr); + if (status < 0) + return status; + + /* We initialize @insn->size to the maximal possible size. It will be + * set to the actual size during instruction decode. + */ + insn->size = (uint8_t) status; + + return pt_ild_decode(insn, iext); +} + +/* Update the return-address stack if @insn is a near call. + * + * Returns zero on success, a negative error code otherwise. + */ +static inline int pt_blk_log_call(struct pt_block_decoder *decoder, + const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + if (!decoder || !insn || !iext) + return -pte_internal; + + if (insn->iclass != ptic_call) + return 0; + + /* Ignore direct calls to the next instruction that are used for + * position independent code. + */ + if (iext->variant.branch.is_direct && + !iext->variant.branch.displacement) + return 0; + + return pt_retstack_push(&decoder->retstack, insn->ip + insn->size); +} + +/* Proceed by one instruction. + * + * Tries to decode the instruction at @decoder->ip and, on success, adds it to + * @block and provides it in @pinsn and @piext. + * + * The instruction will not be added if: + * + * - the memory could not be read: return error + * - it could not be decoded: return error + * - @block is already full: return zero + * - @block would switch sections: return zero + * + * Returns a positive integer if the instruction was added. + * Returns zero if the instruction didn't fit into @block. + * Returns a negative error code otherwise. + */ +static int pt_blk_proceed_one_insn(struct pt_block_decoder *decoder, + struct pt_block *block, + struct pt_insn *pinsn, + struct pt_insn_ext *piext) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + uint16_t ninsn; + int status; + + if (!decoder || !block || !pinsn || !piext) + return -pte_internal; + + /* There's nothing to do if there is no room in @block. */ + ninsn = block->ninsn + 1; + if (!ninsn) + return 0; + + /* The truncated instruction must be last. */ + if (block->truncated) + return 0; + + memset(&insn, 0, sizeof(insn)); + memset(&iext, 0, sizeof(iext)); + + insn.mode = decoder->mode; + insn.ip = decoder->ip; + + status = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid); + if (status < 0) + return status; + + /* We do not switch sections inside a block. */ + if (insn.isid != block->isid) { + if (!pt_blk_block_is_empty(block)) + return 0; + + block->isid = insn.isid; + } + + /* If we couldn't read @insn's memory in one chunk from @insn.isid, we + * provide the memory in @block. + */ + if (insn.truncated) { + memcpy(block->raw, insn.raw, insn.size); + block->size = insn.size; + block->truncated = 1; + } + + /* Log calls' return addresses for return compression. */ + status = pt_blk_log_call(decoder, &insn, &iext); + if (status < 0) + return status; + + /* We have a new instruction. */ + block->iclass = insn.iclass; + block->end_ip = insn.ip; + block->ninsn = ninsn; + + *pinsn = insn; + *piext = iext; + + return 1; +} + + +/* Proceed to a particular type of instruction without using trace. + * + * Proceed until we reach an instruction for which @predicate returns a positive + * integer or until: + * + * - @predicate returns an error: return error + * - @block is full: return zero + * - @block would switch sections: return zero + * - we would need trace: return -pte_bad_query + * + * Provide the last instruction that was reached in @insn and @iext. + * + * Update @decoder->ip to point to the last IP that was reached. If we fail due + * to lack of trace or if we reach a desired instruction, this is @insn->ip; + * otherwise this is the next instruction's IP. + * + * Returns a positive integer if a suitable instruction was reached. + * Returns zero if no such instruction was reached. + * Returns a negative error code otherwise. + */ +static int pt_blk_proceed_to_insn(struct pt_block_decoder *decoder, + struct pt_block *block, + struct pt_insn *insn, + struct pt_insn_ext *iext, + int (*predicate)(const struct pt_insn *, + const struct pt_insn_ext *)) +{ + int status; + + if (!decoder || !insn || !predicate) + return -pte_internal; + + for (;;) { + status = pt_blk_proceed_one_insn(decoder, block, insn, iext); + if (status <= 0) + return status; + + /* We're done if this instruction matches the spec (positive + * status) or we run into an error (negative status). + */ + status = predicate(insn, iext); + if (status != 0) + return status; + + /* Let's see if we can proceed to the next IP without trace. */ + status = pt_insn_next_ip(&decoder->ip, insn, iext); + if (status < 0) + return status; + + /* End the block if the user asked us to. + * + * We only need to take care about direct near calls. Indirect + * and far calls require trace and will naturally end a block. + */ + if (decoder->flags.variant.block.end_on_call && + (insn->iclass == ptic_call)) + return 0; + } +} + +/* Proceed to a particular IP without using trace. + * + * Proceed until we reach @ip or until: + * + * - @block is full: return zero + * - @block would switch sections: return zero + * - we would need trace: return -pte_bad_query + * + * Provide the last instruction that was reached in @insn and @iext. If we + * reached @ip, this is the instruction preceding it. + * + * Update @decoder->ip to point to the last IP that was reached. If we fail due + * to lack of trace, this is @insn->ip; otherwise this is the next instruction's + * IP. + * + * Returns a positive integer if @ip was reached. + * Returns zero if no such instruction was reached. + * Returns a negative error code otherwise. + */ +static int pt_blk_proceed_to_ip(struct pt_block_decoder *decoder, + struct pt_block *block, struct pt_insn *insn, + struct pt_insn_ext *iext, uint64_t ip) +{ + int status; + + if (!decoder || !insn) + return -pte_internal; + + for (;;) { + /* We're done when we reach @ip. We may not even have to decode + * a single instruction in some cases. + */ + if (decoder->ip == ip) + return 1; + + status = pt_blk_proceed_one_insn(decoder, block, insn, iext); + if (status <= 0) + return status; + + /* Let's see if we can proceed to the next IP without trace. */ + status = pt_insn_next_ip(&decoder->ip, insn, iext); + if (status < 0) + return status; + + /* End the block if the user asked us to. + * + * We only need to take care about direct near calls. Indirect + * and far calls require trace and will naturally end a block. + */ + if (decoder->flags.variant.block.end_on_call && + (insn->iclass == ptic_call)) + return 0; + } +} + +/* Proceed to a particular IP with trace, if necessary. + * + * Proceed until we reach @ip or until: + * + * - @block is full: return zero + * - @block would switch sections: return zero + * - we need trace: return zero + * + * Update @decoder->ip to point to the last IP that was reached. + * + * A return of zero ends @block. + * + * Returns a positive integer if @ip was reached. + * Returns zero if no such instruction was reached. + * Returns a negative error code otherwise. + */ +static int pt_blk_proceed_to_ip_with_trace(struct pt_block_decoder *decoder, + struct pt_block *block, + uint64_t ip) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + int status; + + /* Try to reach @ip without trace. + * + * We're also OK if @block overflowed or we switched sections and we + * have to try again in the next iteration. + */ + status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext, ip); + if (status != -pte_bad_query) + return status; + + /* Needing trace is not an error. We use trace to determine the next + * start IP and end the block. + */ + return pt_blk_proceed_with_trace(decoder, &insn, &iext); +} + +/* Proceed to the event location for a disabled event. + * + * We have a (synchronous) disabled event pending. Proceed to the event + * location and indicate whether we were able to reach it. + * + * The last instruction that was reached is stored in @insn/@iext. + * + * Returns a positive integer if the event location was reached. + * Returns zero if the event location was not reached. + * Returns a negative error code otherwise. + */ +static int pt_blk_proceed_to_disabled(struct pt_block_decoder *decoder, + struct pt_block *block, + struct pt_insn *insn, + struct pt_insn_ext *iext, + const struct pt_event *ev) +{ + if (!decoder || !block || !ev) + return -pte_internal; + + + if (ev->ip_suppressed) { + /* A synchronous disabled event also binds to far branches and + * CPL-changing instructions. Both would require trace, + * however, and are thus implicitly handled by erroring out. + * + * The would-require-trace error is handled by our caller. + */ + return pt_blk_proceed_to_insn(decoder, block, insn, iext, + pt_insn_changes_cr3); + } else + return pt_blk_proceed_to_ip(decoder, block, insn, iext, + ev->variant.disabled.ip); +} + +/* Proceed to the event location for an async paging event. + * + * We have an async paging event pending. Proceed to the event location and + * indicate whether we were able to reach it. Needing trace in order to proceed + * is not an error in this case but ends the block. + * + * Returns a positive integer if the event location was reached. + * Returns zero if the event location was not reached. + * Returns a negative error code otherwise. + */ +static int pt_blk_proceed_to_async_paging(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + if (!ev) + return -pte_internal; + + /* Apply the event immediately if we don't have an IP. */ + if (ev->ip_suppressed) + return 1; + + return pt_blk_proceed_to_ip_with_trace(decoder, block, + ev->variant.async_paging.ip); +} + +/* Proceed to the event location for an async vmcs event. + * + * We have an async vmcs event pending. Proceed to the event location and + * indicate whether we were able to reach it. Needing trace in order to proceed + * is not an error in this case but ends the block. + * + * Returns a positive integer if the event location was reached. + * Returns zero if the event location was not reached. + * Returns a negative error code otherwise. + */ +static int pt_blk_proceed_to_async_vmcs(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + if (!ev) + return -pte_internal; + + /* Apply the event immediately if we don't have an IP. */ + if (ev->ip_suppressed) + return 1; + + return pt_blk_proceed_to_ip_with_trace(decoder, block, + ev->variant.async_vmcs.ip); +} + +/* Proceed to the event location for an exec mode event. + * + * We have an exec mode event pending. Proceed to the event location and + * indicate whether we were able to reach it. Needing trace in order to proceed + * is not an error in this case but ends the block. + * + * Returns a positive integer if the event location was reached. + * Returns zero if the event location was not reached. + * Returns a negative error code otherwise. + */ +static int pt_blk_proceed_to_exec_mode(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + if (!ev) + return -pte_internal; + + /* Apply the event immediately if we don't have an IP. */ + if (ev->ip_suppressed) + return 1; + + return pt_blk_proceed_to_ip_with_trace(decoder, block, + ev->variant.exec_mode.ip); +} + +/* Try to work around erratum SKD022. + * + * If we get an asynchronous disable on VMLAUNCH or VMRESUME, the FUP that + * caused the disable to be asynchronous might have been bogous. + * + * Returns a positive integer if the erratum has been handled. + * Returns zero if the erratum does not apply. + * Returns a negative error code otherwise. + */ +static int pt_blk_handle_erratum_skd022(struct pt_block_decoder *decoder, + struct pt_event *ev) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + int errcode; + + if (!decoder || !ev) + return -pte_internal; + + insn.mode = decoder->mode; + insn.ip = ev->variant.async_disabled.at; + + errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid); + if (errcode < 0) + return 0; + + switch (iext.iclass) { + default: + /* The erratum does not apply. */ + return 0; + + case PTI_INST_VMLAUNCH: + case PTI_INST_VMRESUME: + /* The erratum may apply. We can't be sure without a lot more + * analysis. Let's assume it does. + * + * We turn the async disable into a sync disable. Our caller + * will restart event processing. + */ + ev->type = ptev_disabled; + ev->variant.disabled.ip = ev->variant.async_disabled.ip; + + return 1; + } +} + +/* Proceed to the next event. + * + * We have an event pending. Proceed to the event location and either process + * the event and continue or postpone the event to the next block. + * + * On our way to the event location we may also be forced to postpone the event + * to the next block, e.g. if we overflow the number of instructions in the + * block or if we need trace in order to reach the event location. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_proceed_event(struct pt_block_decoder *decoder, + struct pt_block *block) +{ + if (!decoder || !block) + return -pte_internal; + + for (;;) { + struct pt_insn_ext iext; + struct pt_insn insn; + struct pt_event *ev; + uint64_t ip; + int status; + + if (!decoder->process_event) + return -pte_internal; + + ev = &decoder->event; + switch (ev->type) { + case ptev_enabled: + status = pt_blk_process_enabled(decoder, block, ev); + if (status <= 0) + return status; + + break; + + case ptev_disabled: + status = pt_blk_proceed_to_disabled(decoder, block, + &insn, &iext, ev); + if (status <= 0) { + /* A synchronous disable event also binds to the + * next indirect or conditional branch, i.e. to + * any branch that would have required trace. + */ + if (status != -pte_bad_query) + return status; + + /* The @decoder->ip still points to the indirect + * or conditional branch instruction that caused + * us to error out. That's not where we expect + * tracing to resume since the instruction + * already retired. + * + * For calls, a fair assumption is that tracing + * resumes after returning from the called + * function. For other types of instructions, + * we simply don't know. + */ + switch (insn.iclass) { + case ptic_call: + case ptic_far_call: + decoder->ip = insn.ip + insn.size; + break; + + default: + decoder->ip = 0ull; + break; + } + } + + status = pt_blk_process_disabled(decoder, block, ev); + if (status <= 0) + return status; + + break; + + case ptev_async_disabled: + ip = ev->variant.async_disabled.at; + + status = pt_blk_proceed_to_ip(decoder, block, &insn, + &iext, ip); + if (status <= 0) + return status; + + if (decoder->query.config.errata.skd022) { + status = pt_blk_handle_erratum_skd022(decoder, + ev); + if (status != 0) { + if (status < 0) + return status; + + /* If the erratum hits, we modify the + * event. Try again. + */ + continue; + } + } + + status = pt_blk_process_disabled(decoder, block, ev); + if (status <= 0) + return status; + + break; + + case ptev_async_branch: + ip = ev->variant.async_branch.from; + + status = pt_blk_proceed_to_ip(decoder, block, &insn, + &iext, ip); + if (status <= 0) + return status; + + status = pt_blk_process_async_branch(decoder, block, + ev); + if (status <= 0) + return status; + + break; + + case ptev_paging: + if (!decoder->enabled) { + status = pt_blk_apply_paging(decoder, block, + ev); + if (status < 0) + return status; + + break; + } + + status = pt_blk_proceed_to_insn(decoder, block, &insn, + &iext, + pt_insn_binds_to_pip); + if (status <= 0) + return status; + + status = pt_blk_apply_paging(decoder, block, ev); + if (status < 0) + return status; + + /* We accounted for @insn in @block but we have not + * updated @decoder->ip, yet. Let's do so now. + * + * If we can't, we have to proceed with trace. This + * ends event processing. + */ + status = pt_insn_next_ip(&decoder->ip, &insn, &iext); + if (status < 0) { + if (status != -pte_bad_query) + return status; + + return pt_blk_proceed_with_trace(decoder, &insn, + &iext); + } + + break; + + case ptev_async_paging: + status = pt_blk_proceed_to_async_paging(decoder, block, + ev); + if (status <= 0) + return status; + + status = pt_blk_apply_paging(decoder, block, ev); + if (status < 0) + return status; + + break; + + case ptev_vmcs: + if (!decoder->enabled) { + status = pt_blk_apply_vmcs(decoder, block, ev); + if (status < 0) + return status; + + break; + } + + status = pt_blk_proceed_to_insn(decoder, block, &insn, + &iext, + pt_insn_binds_to_vmcs); + if (status <= 0) + return status; + + status = pt_blk_apply_vmcs(decoder, block, ev); + if (status < 0) + return status; + + /* We accounted for @insn in @block but we have not + * updated @decoder->ip, yet. Let's do so now. + * + * If we can't, we have to proceed with trace. This + * ends event processing. + */ + status = pt_insn_next_ip(&decoder->ip, &insn, &iext); + if (status < 0) { + if (status != -pte_bad_query) + return status; + + return pt_blk_proceed_with_trace(decoder, &insn, + &iext); + } + + break; + + case ptev_async_vmcs: + status = pt_blk_proceed_to_async_vmcs(decoder, block, + ev); + if (status <= 0) + return status; + + status = pt_blk_apply_vmcs(decoder, block, ev); + if (status < 0) + return status; + + break; + + case ptev_overflow: + status = pt_blk_process_overflow(decoder, block, ev); + if (status <= 0) + return status; + + break; + + case ptev_exec_mode: + status = pt_blk_proceed_to_exec_mode(decoder, block, + ev); + if (status <= 0) + return status; + + status = pt_blk_process_exec_mode(decoder, block, ev); + if (status <= 0) + return status; + + break; + + case ptev_tsx: + if (!ev->ip_suppressed) { + ip = ev->variant.tsx.ip; + + status = pt_blk_proceed_to_ip(decoder, block, + &insn, &iext, ip); + if (status <= 0) + return status; + } + + status = pt_blk_process_tsx(decoder, block, ev); + if (status <= 0) + return status; + + break; + + case ptev_stop: + status = pt_blk_apply_stop(decoder, block, ev); + if (status < 0) + return status; + + break; + } + + /* We should have processed the event. If we have not, we might + * spin here forever. + */ + if (decoder->process_event) + return -pte_internal; + + /* Check if we have more events pending. */ + status = pt_blk_fetch_event(decoder); + if (status <= 0) { + if (status < 0) + return status; + + break; + } + } + + return pt_blk_proceed_no_event(decoder, block); +} + +/* Proceed to the next decision point without using the block cache. + * + * Tracing is enabled and we don't have an event pending. Proceed as far as + * we get without trace. Stop when we either: + * + * - need trace in order to continue + * - overflow the max number of instructions in a block + * + * We actually proceed one instruction further to get the start IP for the next + * block. This only updates @decoder's internal state, though. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_proceed_no_event_uncached(struct pt_block_decoder *decoder, + struct pt_block *block) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + int status; + + if (!decoder || !block) + return -pte_internal; + + /* This is overly conservative, really. We shouldn't get a bad-query + * status unless we decoded at least one instruction successfully. + */ + memset(&insn, 0, sizeof(insn)); + memset(&iext, 0, sizeof(iext)); + + /* Proceed as far as we get without trace. */ + status = pt_blk_proceed_to_insn(decoder, block, &insn, &iext, + pt_insn_false); + if (status < 0) { + if (status != -pte_bad_query) + return status; + + return pt_blk_proceed_with_trace(decoder, &insn, &iext); + } + + return 0; +} + +/* Check if @ip is contained in @section loaded at @laddr. + * + * Returns non-zero if it is. + * Returns zero if it isn't or of @section is NULL. + */ +static inline int pt_blk_is_in_section(uint64_t ip, + const struct pt_section *section, + uint64_t laddr) +{ + uint64_t begin, end; + + begin = laddr; + end = begin + pt_section_size(section); + + return (begin <= ip && ip < end); +} + +/* Insert a trampoline block cache entry. + * + * Add a trampoline block cache entry at @ip to continue at @nip, where @nip + * must be the next instruction after @ip. + * + * Both @ip and @nip must be section-relative + * + * Returns zero on success, a negative error code otherwise. + */ +static inline int pt_blk_add_trampoline(struct pt_block_cache *bcache, + uint64_t ip, uint64_t nip, + enum pt_exec_mode mode) +{ + struct pt_bcache_entry bce; + int64_t disp; + + /* The displacement from @ip to @nip for the trampoline. */ + disp = (int64_t) (nip - ip); + + memset(&bce, 0, sizeof(bce)); + bce.displacement = (int32_t) disp; + bce.ninsn = 1; + bce.mode = mode; + bce.qualifier = ptbq_again; + + /* If we can't reach @nip without overflowing the displacement field, we + * have to stop and re-decode the instruction at @ip. + */ + if ((int64_t) bce.displacement != disp) { + + memset(&bce, 0, sizeof(bce)); + bce.ninsn = 1; + bce.mode = mode; + bce.qualifier = ptbq_decode; + } + + return pt_bcache_add(bcache, ip, bce); +} + +enum { + /* The maximum number of steps when filling the block cache. */ + bcache_fill_steps = 0x400 +}; + +/* Proceed to the next instruction and fill the block cache for @decoder->ip. + * + * Tracing is enabled and we don't have an event pending. The current IP is not + * yet cached. + * + * Proceed one instruction without using the block cache, then try to proceed + * further using the block cache. + * + * On our way back, add a block cache entry for the IP before proceeding. Note + * that the recursion is bounded by @steps and ultimately by the maximum number + * of instructions in a block. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_proceed_no_event_fill_cache(struct pt_block_decoder *decoder, + struct pt_block *block, + struct pt_block_cache *bcache, + struct pt_section *section, + uint64_t laddr, size_t steps) +{ + struct pt_bcache_entry bce; + struct pt_insn_ext iext; + struct pt_insn insn; + uint64_t nip, dip; + int64_t disp; + int status; + + if (!decoder || !steps) + return -pte_internal; + + /* Proceed one instruction by decoding and examining it. + * + * Note that we also return on a status of zero that indicates that the + * instruction didn't fit into @block. + */ + status = pt_blk_proceed_one_insn(decoder, block, &insn, &iext); + if (status <= 0) + return status; + + /* Let's see if we can proceed to the next IP without trace. + * + * If we can't, this is certainly a decision point. + */ + status = pt_insn_next_ip(&decoder->ip, &insn, &iext); + if (status < 0) { + if (status != -pte_bad_query) + return status; + + memset(&bce, 0, sizeof(bce)); + bce.ninsn = 1; + bce.mode = insn.mode; + bce.isize = insn.size; + + /* Clear the instruction size in case of overflows. */ + if ((uint8_t) bce.isize != insn.size) + bce.isize = 0; + + switch (insn.iclass) { + case ptic_error: + case ptic_other: + return -pte_internal; + + case ptic_jump: + /* A direct jump doesn't require trace. */ + if (iext.variant.branch.is_direct) + return -pte_internal; + + bce.qualifier = ptbq_indirect; + break; + + case ptic_call: + /* A direct call doesn't require trace. */ + if (iext.variant.branch.is_direct) + return -pte_internal; + + bce.qualifier = ptbq_ind_call; + break; + + case ptic_return: + bce.qualifier = ptbq_return; + break; + + case ptic_cond_jump: + bce.qualifier = ptbq_cond; + break; + + case ptic_far_call: + case ptic_far_return: + case ptic_far_jump: + bce.qualifier = ptbq_indirect; + break; + } + + /* If the block was truncated, we have to decode its last + * instruction each time. + * + * We could have skipped the above switch and size assignment in + * this case but this is already a slow and hopefully infrequent + * path. + */ + if (block->truncated) + bce.qualifier = ptbq_decode; + + status = pt_bcache_add(bcache, insn.ip - laddr, bce); + if (status < 0) + return status; + + return pt_blk_proceed_with_trace(decoder, &insn, &iext); + } + + /* The next instruction's IP. */ + nip = decoder->ip; + + /* Even if we were able to proceed without trace, we might have to stop + * here for various reasons: + * + * - at near direct calls to update the return-address stack + * + * We are forced to re-decode @insn to get the branch displacement. + * + * Even though it is constant, we don't cache it to avoid increasing + * the size of a cache entry. Note that the displacement field is + * zero for this entry and we might be tempted to use it - but other + * entries that point to this decision point will have non-zero + * displacement. + * + * We could proceed after a near direct call but we migh as well + * postpone it to the next iteration. Make sure to end the block if + * @decoder->flags.variant.block.end_on_call is set, though. + * + * - if we switched sections + * + * This ends a block just like a branch that requires trace. + * + * We need to re-decode @insn in order to determine the start IP of + * the next block. + * + * - if the block is truncated + * + * We need to read the last instruction's memory from multiple + * sections and provide it to the user. + * + * We could still use the block cache but then we'd have to handle + * this case for each qualifier. Truncation is hopefully rare and + * having to read the memory for the instruction from multiple + * sections is already slow. Let's rather keep things simple and + * route it through the decode flow, where we already have + * everything in place. + */ + if (insn.iclass == ptic_call || + !pt_blk_is_in_section(nip, section, laddr) || block->truncated) { + + memset(&bce, 0, sizeof(bce)); + bce.ninsn = 1; + bce.mode = insn.mode; + bce.qualifier = ptbq_decode; + + return pt_bcache_add(bcache, insn.ip - laddr, bce); + } + + /* We proceeded one instruction. Let's see if we have a cache entry for + * the next instruction. + */ + status = pt_bcache_lookup(&bce, bcache, nip - laddr); + if (status < 0) + return status; + + /* If we don't have a valid cache entry, yet, fill the cache some more. + * + * On our way back, we add a cache entry for this instruction based on + * the cache entry of the succeeding instruction. + */ + if (!pt_bce_is_valid(bce)) { + /* If we exceeded the maximum number of allowed steps, we insert + * a trampoline to the next instruction. + * + * The next time we encounter the same code, we will use the + * trampoline to jump directly to where we left off this time + * and continue from there. + */ + steps -= 1; + if (!steps) + return pt_blk_add_trampoline(bcache, insn.ip - laddr, + nip - laddr, insn.mode); + + status = pt_blk_proceed_no_event_fill_cache(decoder, block, + bcache, section, + laddr, steps); + if (status < 0) + return status; + + /* Let's see if we have more luck this time. */ + status = pt_bcache_lookup(&bce, bcache, nip - laddr); + if (status < 0) + return status; + + /* If we still don't have a valid cache entry, we're done. Most + * likely, @block overflowed and we couldn't proceed past the + * next instruction. + */ + if (!pt_bce_is_valid(bce)) + return 0; + } + + /* We must not have switched execution modes. + * + * This would require an event and we're on the no-event flow. + */ + if (pt_bce_exec_mode(bce) != insn.mode) + return -pte_internal; + + /* The decision point IP and the displacement from @insn.ip. */ + dip = nip + bce.displacement; + disp = (int64_t) (dip - insn.ip); + + /* We must not have switched sections between @nip and @dip since the + * cache entry at @nip brought us to @dip. + */ + if (!pt_blk_is_in_section(dip, section, laddr)) + return -pte_internal; + + /* Let's try to reach @nip's decision point from @insn.ip. + * + * There are two fields that may overflow: @bce.ninsn and + * @bce.displacement. + */ + bce.ninsn += 1; + bce.displacement = (int32_t) disp; + + /* If none of them overflowed, we're done. + * + * If one or both overflowed, let's try to insert a trampoline, i.e. we + * try to reach @dip via a ptbq_again entry to @nip. + */ + if (!bce.ninsn || ((int64_t) bce.displacement != disp)) + return pt_blk_add_trampoline(bcache, insn.ip - laddr, + nip - laddr, insn.mode); + + /* We're done. Add the cache entry. + * + * There's a chance that other decoders updated the cache entry in the + * meantime. They should have come to the same conclusion as we, + * though, and the cache entries should be identical. + * + * Cache updates are atomic so even if the two versions were not + * identical, we wouldn't care because they are both correct. + */ + return pt_bcache_add(bcache, insn.ip - laddr, bce); +} + +/* Proceed at a potentially truncated instruction. + * + * We were not able to decode the instruction at @decoder->ip in @decoder's + * cached section. This is typically caused by not having enough bytes. + * + * Try to decode the instruction again using the entire image. If this succeeds + * we expect to end up with an instruction that was truncated in the section it + * started. We provide the full instruction in this case and end the block. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_proceed_truncated(struct pt_block_decoder *decoder, + struct pt_block *block) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + int errcode; + + if (!decoder || !block) + return -pte_internal; + + memset(&iext, 0, sizeof(iext)); + memset(&insn, 0, sizeof(insn)); + + insn.mode = decoder->mode; + insn.ip = decoder->ip; + + errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid); + if (errcode < 0) + return errcode; + + /* We shouldn't use this function if the instruction isn't truncated. */ + if (!insn.truncated) + return -pte_internal; + + /* Provide the instruction in the block. This ends the block. */ + memcpy(block->raw, insn.raw, insn.size); + block->iclass = insn.iclass; + block->size = insn.size; + block->truncated = 1; + + /* Log calls' return addresses for return compression. */ + errcode = pt_blk_log_call(decoder, &insn, &iext); + if (errcode < 0) + return errcode; + + /* Let's see if we can proceed to the next IP without trace. + * + * The truncated instruction ends the block but we still need to get the + * next block's start IP. + */ + errcode = pt_insn_next_ip(&decoder->ip, &insn, &iext); + if (errcode < 0) { + if (errcode != -pte_bad_query) + return errcode; + + return pt_blk_proceed_with_trace(decoder, &insn, &iext); + } + + return 0; +} + +/* Proceed to the next decision point using the block cache. + * + * Tracing is enabled and we don't have an event pending. We already set + * @block's isid. All reads are done within @section as we're not switching + * sections between blocks. + * + * Proceed as far as we get without trace. Stop when we either: + * + * - need trace in order to continue + * - overflow the max number of instructions in a block + * + * We actually proceed one instruction further to get the start IP for the next + * block. This only updates @decoder's internal state, though. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_proceed_no_event_cached(struct pt_block_decoder *decoder, + struct pt_block *block, + struct pt_block_cache *bcache, + struct pt_section *section, + uint64_t laddr) +{ + struct pt_bcache_entry bce; + uint16_t binsn, ninsn; + int status; + + if (!decoder || !block) + return -pte_internal; + + status = pt_bcache_lookup(&bce, bcache, decoder->ip - laddr); + if (status < 0) + return status; + + /* If we don't find a valid cache entry, fill the cache. */ + if (!pt_bce_is_valid(bce)) + return pt_blk_proceed_no_event_fill_cache(decoder, block, + bcache, section, + laddr, + bcache_fill_steps); + + /* We have a valid cache entry. Let's first check if the way to the + * decision point still fits into @block. + * + * If it doesn't, we end the block without filling it as much as we + * could since this would require us to switch to the slow path. + * + * On the next iteration, we will start with an empty block, which is + * guaranteed to have enough room for at least one block cache entry. + */ + binsn = block->ninsn; + ninsn = binsn + (uint16_t) bce.ninsn; + if (ninsn < binsn) + return 0; + + /* Jump ahead to the decision point and proceed from there. + * + * We're not switching execution modes so even if @block already has an + * execution mode, it will be the one we're going to set. + */ + decoder->ip += bce.displacement; + + /* We don't know the instruction class so we should be setting it to + * ptic_error. Since we will be able to fill it back in later in most + * cases, we move the clearing to the switch cases that don't. + */ + block->end_ip = decoder->ip; + block->ninsn = ninsn; + block->mode = pt_bce_exec_mode(bce); + + + switch (pt_bce_qualifier(bce)) { + case ptbq_again: + /* We're not able to reach the actual decision point due to + * overflows so we inserted a trampoline. + * + * We don't know the instruction and it is not guaranteed that + * we will proceed further (e.g. if @block overflowed). Let's + * clear any previously stored instruction class which has + * become invalid when we updated @block->ninsn. + */ + block->iclass = ptic_error; + + return pt_blk_proceed_no_event_cached(decoder, block, bcache, + section, laddr); + + case ptbq_cond: + /* We're at a conditional branch. */ + block->iclass = ptic_cond_jump; + + /* Let's first check whether we know the size of the + * instruction. If we do, we might get away without decoding + * the instruction. + * + * If we don't know the size we might as well do the full decode + * and proceed-with-trace flow we do for ptbq_decode. + */ + if (bce.isize) { + uint64_t ip; + int taken; + + /* If the branch is not taken, we don't need to decode + * the instruction at @decoder->ip. + * + * If it is taken, we have to implement everything here. + * We can't use the normal decode and proceed-with-trace + * flow since we already consumed the TNT bit. + */ + status = pt_qry_cond_branch(&decoder->query, &taken); + if (status < 0) + return status; + + /* Preserve the query decoder's response which indicates + * upcoming events. + */ + decoder->status = status; + + ip = decoder->ip; + if (taken) { + struct pt_insn_ext iext; + struct pt_insn insn; + + memset(&iext, 0, sizeof(iext)); + memset(&insn, 0, sizeof(insn)); + + insn.mode = pt_bce_exec_mode(bce); + insn.ip = ip; + + status = pt_blk_decode_in_section(&insn, &iext, + section, + laddr); + if (status < 0) + return status; + + ip += iext.variant.branch.displacement; + } + + decoder->ip = ip + bce.isize; + break; + } + + /* Fall through to ptbq_decode. */ + + case ptbq_decode: { + struct pt_insn_ext iext; + struct pt_insn insn; + + /* We need to decode the instruction at @decoder->ip and decide + * what to do based on that. + * + * We already accounted for the instruction so we can't just + * call pt_blk_proceed_one_insn(). + */ + + memset(&iext, 0, sizeof(iext)); + memset(&insn, 0, sizeof(insn)); + + insn.mode = pt_bce_exec_mode(bce); + insn.ip = decoder->ip; + + status = pt_blk_decode_in_section(&insn, &iext, section, laddr); + if (status < 0) { + if (status != -pte_bad_insn) + return status; + + return pt_blk_proceed_truncated(decoder, block); + } + + /* We just decoded @insn so we know the instruction class. */ + block->iclass = insn.iclass; + + /* Log calls' return addresses for return compression. */ + status = pt_blk_log_call(decoder, &insn, &iext); + if (status < 0) + return status; + + /* Let's see if we can proceed to the next IP without trace. + * + * Note that we also stop due to displacement overflows or to + * maintain the return-address stack for near direct calls. + */ + status = pt_insn_next_ip(&decoder->ip, &insn, &iext); + if (status < 0) { + if (status != -pte_bad_query) + return status; + + /* We can't, so let's proceed with trace, which + * completes the block. + */ + return pt_blk_proceed_with_trace(decoder, &insn, &iext); + } + + /* End the block if the user asked us to. + * + * We only need to take care about direct near calls. Indirect + * and far calls require trace and will naturally end a block. + */ + if (decoder->flags.variant.block.end_on_call && + (insn.iclass == ptic_call)) + break; + + /* If we can proceed without trace and we stay in @section we + * may proceed further. + * + * We're done if we switch sections, though. + */ + if (!pt_blk_is_in_section(decoder->ip, section, laddr)) + break; + + return pt_blk_proceed_no_event_cached(decoder, block, bcache, + section, laddr); + } + + case ptbq_ind_call: { + uint64_t ip; + + /* We're at a near indirect call. */ + block->iclass = ptic_call; + + /* We need to update the return-address stack and query the + * destination IP. + */ + ip = decoder->ip; + + /* If we already know the size of the instruction, we don't need + * to re-decode it. + */ + if (bce.isize) + ip += bce.isize; + else { + struct pt_insn_ext iext; + struct pt_insn insn; + + memset(&iext, 0, sizeof(iext)); + memset(&insn, 0, sizeof(insn)); + + insn.mode = pt_bce_exec_mode(bce); + insn.ip = ip; + + status = pt_blk_decode_in_section(&insn, &iext, section, + laddr); + if (status < 0) + return status; + + ip += insn.size; + } + + status = pt_retstack_push(&decoder->retstack, ip); + if (status < 0) + return status; + + status = pt_qry_indirect_branch(&decoder->query, &decoder->ip); + if (status < 0) + return status; + + /* Preserve the query decoder's response which indicates + * upcoming events. + */ + decoder->status = status; + break; + } + + case ptbq_return: { + int taken; + + /* We're at a near return. */ + block->iclass = ptic_return; + + /* Check for a compressed return. */ + status = pt_qry_cond_branch(&decoder->query, &taken); + if (status < 0) { + if (status != -pte_bad_query) + return status; + + /* The return is not compressed. We need another query + * to determine the destination IP. + */ + status = pt_qry_indirect_branch(&decoder->query, + &decoder->ip); + if (status < 0) + return status; + + /* Preserve the query decoder's response which indicates + * upcoming events. + */ + decoder->status = status; + break; + } + + /* Preserve the query decoder's response which indicates + * upcoming events. + */ + decoder->status = status; + + /* A compressed return is indicated by a taken conditional + * branch. + */ + if (!taken) + return -pte_bad_retcomp; + + return pt_retstack_pop(&decoder->retstack, &decoder->ip); + } + + case ptbq_indirect: + /* We're at an indirect jump or far transfer. + * + * We don't know the exact instruction class and there's no + * reason to decode the instruction for any other purpose. + * + * Indicate that we don't know the instruction class and leave + * it to our caller to decode the instruction if needed. + */ + block->iclass = ptic_error; + + /* This is neither a near call nor return so we don't need to + * touch the return-address stack. + * + * Just query the destination IP. + */ + status = pt_qry_indirect_branch(&decoder->query, &decoder->ip); + if (status < 0) + return status; + + /* Preserve the query decoder's response which indicates + * upcoming events. + */ + decoder->status = status; + break; + } + + return 0; +} + +/* Proceed to the next decision point - try using the cache. + * + * Tracing is enabled and we don't have an event pending. Proceed as far as + * we get without trace. Stop when we either: + * + * - need trace in order to continue + * - overflow the max number of instructions in a block + * + * We actually proceed one instruction further to get the start IP for the next + * block. This only updates @decoder's internal state, though. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_proceed_no_event_trycache(struct pt_block_decoder *decoder, + struct pt_block *block) +{ + struct pt_block_cache *bcache; + struct pt_section *section; + uint64_t laddr; + int isid, errcode; + + if (!decoder || !block) + return -pte_internal; + + isid = pt_blk_cached_section(&decoder->scache, §ion, &laddr, + decoder->image, &decoder->asid, + decoder->ip); + if (isid < 0) { + if (isid != -pte_nomap) + return isid; + + errcode = pt_blk_scache_invalidate(&decoder->scache); + if (errcode < 0) + return errcode; + + isid = pt_image_find(decoder->image, §ion, &laddr, + &decoder->asid, decoder->ip); + if (isid < 0) { + if (isid != -pte_nomap) + return isid; + + /* Even if there is no such section in the image, we may + * still read the memory via the callback function. + */ + return pt_blk_proceed_no_event_uncached(decoder, block); + } + + errcode = pt_section_map(section); + if (errcode < 0) + goto out_put; + + errcode = pt_blk_cache_section(&decoder->scache, section, laddr, + isid); + if (errcode < 0) + goto out_unmap; + } + + /* We do not switch sections inside a block. */ + if (isid != block->isid) { + if (!pt_blk_block_is_empty(block)) + return 0; + + block->isid = isid; + } + + bcache = pt_section_bcache(section); + if (!bcache) + return pt_blk_proceed_no_event_uncached(decoder, block); + + return pt_blk_proceed_no_event_cached(decoder, block, bcache, section, + laddr); + +out_unmap: + (void) pt_section_unmap(section); + +out_put: + (void) pt_section_put(section); + return errcode; +} + +/* Proceed to the next decision point. + * + * We don't have an event pending. Ensure that tracing is enabled and proceed + * as far as we get. Try using the cache, if possible. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_proceed_no_event(struct pt_block_decoder *decoder, + struct pt_block *block) +{ + /* The end of the trace ends a non-empty block. + * + * If we're called again, we will proceed until we really need trace. + * For example, if tracing is currently disabled. + */ + if (decoder->status & pts_eos) { + if (!pt_blk_block_is_empty(block)) + return 0; + + if (!decoder->enabled) + return -pte_eos; + } + + /* If tracing is disabled and we have still trace left but no event, + * something is wrong. + */ + if (!decoder->enabled) + return -pte_no_enable; + + return pt_blk_proceed_no_event_trycache(decoder, block); +} + +/* Proceed to the next event or decision point. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_blk_proceed(struct pt_block_decoder *decoder, + struct pt_block *block) +{ + int event_pending; + + event_pending = pt_blk_fetch_event(decoder); + if (event_pending != 0) { + if (event_pending < 0) + return event_pending; + + return pt_blk_proceed_event(decoder, block); + } + + return pt_blk_proceed_no_event(decoder, block); +} + +static int pt_blk_status(const struct pt_block_decoder *decoder) +{ + int status, flags; + + if (!decoder) + return -pte_internal; + + status = decoder->status; + flags = 0; + + /* Forward end-of-trace indications. + * + * Postpone it as long as we're still processing events, though. + */ + if ((status & pts_eos) && !decoder->process_event) + flags |= pts_eos; + + return flags; +} + +enum { + /* The maximum number of steps to take when determining whether the + * event location can be reached. + */ + bdm64_max_steps = 0x100 +}; + +/* Try to work around erratum BDM64. + * + * If we got a transaction abort immediately following a branch that produced + * trace, the trace for that branch might have been corrupted. + * + * Returns a positive integer if the erratum was handled. + * Returns zero if the erratum does not seem to apply. + * Returns a negative error code otherwise. + */ +static int pt_blk_handle_erratum_bdm64(struct pt_block_decoder *decoder, + const struct pt_block *block, + const struct pt_event *ev) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + int status; + + if (!decoder || !block || !ev) + return -pte_internal; + + /* This only affects aborts. */ + if (!ev->variant.tsx.aborted) + return 0; + + /* This only affects branches that require trace. + * + * If the erratum hits, that branch ended the current block and brought + * us to the trailing event flow. + */ + if (pt_blk_block_is_empty(block)) + return 0; + + insn.mode = block->mode; + insn.ip = block->end_ip; + + status = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid); + if (status < 0) + return 0; + + if (!pt_insn_is_branch(&insn, &iext)) + return 0; + + /* Let's check if we can reach the event location from here. + * + * If we can, let's assume the erratum did not hit. We might still be + * wrong but we're not able to tell. + */ + status = pt_insn_range_is_contiguous(decoder->ip, ev->variant.tsx.ip, + decoder->mode, decoder->image, + &decoder->asid, bdm64_max_steps); + if (status > 0) + return 0; + + /* We can't reach the event location. This could either mean that we + * stopped too early (and status is zero) or that the erratum hit. + * + * We assume the latter and pretend that the previous branch brought us + * to the event location, instead. + */ + decoder->ip = ev->variant.tsx.ip; + + return 1; +} + +/* Handle a trailing TSX event. + * + * This involves handling erratum BDM64. + * + * Returns a positive integer if the event is to be postponed. + * Returns zero if the event was handled successfully. + * Returns a negative error code otherwise. + */ +static inline int pt_blk_handle_trailing_tsx(struct pt_block_decoder *decoder, + struct pt_block *block, + const struct pt_event *ev) +{ + if (!decoder || !ev) + return -pte_internal; + + if (!ev->ip_suppressed) { + if (decoder->query.config.errata.bdm64) { + int status; + + status = pt_blk_handle_erratum_bdm64(decoder, block, + ev); + if (status < 0) + return 1; + } + + if (decoder->ip != ev->variant.tsx.ip) + return 1; + } + + return pt_blk_apply_tsx(decoder, block, ev); +} + +/* Process events that bind to the current decoder IP. + * + * We filled a block and proceeded to the next IP, which will become the start + * IP of the next block. Process any pending events that bind to that IP so we + * can indicate their effect in the current block. + * + * Returns a non-negative pt_status_flag bit-vector on success, a negative error + * code otherwise. + */ +static int pt_blk_process_trailing_events(struct pt_block_decoder *decoder, + struct pt_block *block) +{ + if (!decoder) + return -pte_internal; + + for (;;) { + struct pt_event *ev; + int status; + + status = pt_blk_fetch_event(decoder); + if (status <= 0) { + if (status < 0) + return status; + + break; + } + + ev = &decoder->event; + switch (ev->type) { + case ptev_enabled: + case ptev_disabled: + case ptev_paging: + case ptev_vmcs: + case ptev_overflow: + break; + + case ptev_async_disabled: + if (decoder->ip != ev->variant.async_disabled.at) + break; + + if (decoder->query.config.errata.skd022) { + status = pt_blk_handle_erratum_skd022(decoder, + ev); + if (status != 0) { + if (status < 0) + break; + + continue; + } + } + + + status = pt_blk_apply_disabled(decoder, block, ev); + if (status < 0) + return status; + + continue; + + case ptev_async_branch: + if (decoder->ip != ev->variant.async_branch.from) + break; + + status = pt_blk_apply_async_branch(decoder, block, ev); + if (status < 0) + return status; + + continue; + + case ptev_async_paging: + if (!ev->ip_suppressed && + decoder->ip != ev->variant.async_paging.ip) + break; + + status = pt_blk_apply_paging(decoder, block, ev); + if (status < 0) + return status; + + continue; + + case ptev_async_vmcs: + if (!ev->ip_suppressed && + decoder->ip != ev->variant.async_vmcs.ip) + break; + + status = pt_blk_apply_vmcs(decoder, block, ev); + if (status < 0) + return status; + + continue; + + case ptev_exec_mode: + if (!ev->ip_suppressed && + decoder->ip != ev->variant.exec_mode.ip) + break; + + status = pt_blk_apply_exec_mode(decoder, block, ev); + if (status < 0) + return status; + + continue; + + case ptev_tsx: + status = pt_blk_handle_trailing_tsx(decoder, block, ev); + if (status < 0) + return status; + + if (status > 0) + break; + + continue; + + case ptev_stop: + status = pt_blk_apply_stop(decoder, block, ev); + if (status < 0) + return status; + + continue; + } + + /* If we fall out of the switch, we're done. */ + break; + } + + return pt_blk_status(decoder); +} + +/* Collect one block. + * + * Fill a new, empty block. + * + * Returns a non-negative pt_status_flag bit-vector on success, a negative error + * code otherwise. + */ +static int pt_blk_collect(struct pt_block_decoder *decoder, + struct pt_block *block) +{ + int errcode; + + if (!decoder || !block) + return -pte_internal; + + /* Zero-initialize the block in case of error returns. */ + memset(block, 0, sizeof(*block)); + + /* Fill in a few things from the current decode state. + * + * This reflects the state of the last pt_blk_next() or pt_blk_start() + * call. Note that, unless we stop with tracing disabled, we proceed + * already to the start IP of the next block. + * + * Some of the state may later be overwritten as we process events. + */ + block->ip = decoder->ip; + block->mode = decoder->mode; + if (decoder->speculative) + block->speculative = 1; + + /* Proceed one block. */ + errcode = pt_blk_proceed(decoder, block); + if (errcode < 0) + return errcode; + + /* We may still have events left that trigger on the current IP. + * + * This IP lies outside of @block but events typically bind to the IP of + * the last instruction that did not retire. + */ + return pt_blk_process_trailing_events(decoder, block); +} + +int pt_blk_next(struct pt_block_decoder *decoder, struct pt_block *ublock, + size_t size) +{ + struct pt_block block, *pblock; + int errcode, status; + + if (!decoder || !ublock) + return -pte_invalid; + + pblock = size == sizeof(block) ? ublock : █ + + status = pt_blk_collect(decoder, pblock); + + errcode = block_to_user(ublock, size, pblock); + if (errcode < 0) + return errcode; + + return status; +} diff --git a/libipt/src/pt_config.c b/libipt/src/pt_config.c new file mode 100644 index 0000000..e426f07 --- /dev/null +++ b/libipt/src/pt_config.c @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "intel-pt.h" + +#include +#include + + +int pt_cpu_errata(struct pt_errata *errata, const struct pt_cpu *cpu) +{ + if (!errata || !cpu) + return -pte_invalid; + + memset(errata, 0, sizeof(*errata)); + + /* We don't know about others. */ + if (cpu->vendor != pcv_intel) + return 0; + + switch (cpu->family) { + case 0x6: + switch (cpu->model) { + case 0x3d: + case 0x47: + case 0x4f: + case 0x56: + errata->bdm70 = 1; + errata->bdm64 = 1; + break; + + case 0x4e: + case 0x5e: + errata->bdm70 = 1; + errata->skd007 = 1; + errata->skd022 = 1; + errata->skd010 = 1; + break; + } + break; + } + + return 0; +} + +int pt_config_from_user(struct pt_config *config, + const struct pt_config *uconfig) +{ + uint8_t *begin, *end; + size_t size; + + if (!config) + return -pte_internal; + + if (!uconfig) + return -pte_invalid; + + size = uconfig->size; + if (size < offsetof(struct pt_config, decode)) + return -pte_bad_config; + + begin = uconfig->begin; + end = uconfig->end; + + if (!begin || !end || end < begin) + return -pte_bad_config; + + /* Ignore fields in the user's configuration we don't know; zero out + * fields the user didn't know about. + */ + if (sizeof(*config) <= size) + size = sizeof(*config); + else + memset(((uint8_t *) config) + size, 0, sizeof(*config) - size); + + /* Copy (portions of) the user's configuration. */ + memcpy(config, uconfig, size); + + /* We copied user's size - fix it. */ + config->size = size; + + return 0; +} diff --git a/libipt/src/pt_cpu.c b/libipt/src/pt_cpu.c new file mode 100644 index 0000000..5c1d66b --- /dev/null +++ b/libipt/src/pt_cpu.c @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_cpuid.h" + +#include "intel-pt.h" + +#include +#include + + +const char *cpu_vendors[] = { + "", + "GenuineIntel" +}; + +enum { + pt_cpuid_vendor_size = 12 +}; + +union cpu_vendor { + /* The raw data returned from cpuid. */ + struct { + uint32_t ebx; + uint32_t edx; + uint32_t ecx; + } cpuid; + + /* The resulting vendor string. */ + char vendor_string[pt_cpuid_vendor_size]; +}; + +static enum pt_cpu_vendor cpu_vendor(void) +{ + union cpu_vendor vendor; + uint32_t eax; + size_t i; + + memset(&vendor, 0, sizeof(vendor)); + eax = 0; + + pt_cpuid(0u, &eax, &vendor.cpuid.ebx, &vendor.cpuid.ecx, + &vendor.cpuid.edx); + + for (i = 0; i < sizeof(cpu_vendors)/sizeof(*cpu_vendors); i++) + if (strncmp(vendor.vendor_string, + cpu_vendors[i], pt_cpuid_vendor_size) == 0) + return (enum pt_cpu_vendor) i; + + return pcv_unknown; +} + +static uint32_t cpu_info(void) +{ + uint32_t eax, ebx, ecx, edx; + + eax = 0; + ebx = 0; + ecx = 0; + edx = 0; + pt_cpuid(1u, &eax, &ebx, &ecx, &edx); + + return eax; +} + +int pt_cpu_parse(struct pt_cpu *cpu, const char *s) +{ + const char sep = '/'; + char *endptr; + long family, model, stepping; + + if (!cpu || !s) + return -pte_invalid; + + family = strtol(s, &endptr, 0); + if (s == endptr || *endptr == '\0' || *endptr != sep) + return -pte_invalid; + + if (family < 0 || family > USHRT_MAX) + return -pte_invalid; + + /* skip separator */ + s = endptr + 1; + + model = strtol(s, &endptr, 0); + if (s == endptr || (*endptr != '\0' && *endptr != sep)) + return -pte_invalid; + + if (model < 0 || model > UCHAR_MAX) + return -pte_invalid; + + if (*endptr == '\0') + /* stepping was omitted, it defaults to 0 */ + stepping = 0; + else { + /* skip separator */ + s = endptr + 1; + + stepping = strtol(s, &endptr, 0); + if (*endptr != '\0') + return -pte_invalid; + + if (stepping < 0 || stepping > UCHAR_MAX) + return -pte_invalid; + } + + cpu->vendor = pcv_intel; + cpu->family = (uint16_t) family; + cpu->model = (uint8_t) model; + cpu->stepping = (uint8_t) stepping; + + return 0; +} + +int pt_cpu_read(struct pt_cpu *cpu) +{ + uint32_t info; + uint16_t family; + + if (!cpu) + return -pte_invalid; + + cpu->vendor = cpu_vendor(); + + info = cpu_info(); + + cpu->family = family = (info>>8) & 0xf; + if (family == 0xf) + cpu->family += (info>>20) & 0xf; + + cpu->model = (info>>4) & 0xf; + if (family == 0x6 || family == 0xf) + cpu->model += (info>>12) & 0xf0; + + cpu->stepping = (info>>0) & 0xf; + + return 0; +} diff --git a/libipt/src/pt_decoder_function.c b/libipt/src/pt_decoder_function.c new file mode 100644 index 0000000..e997744 --- /dev/null +++ b/libipt/src/pt_decoder_function.c @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_decoder_function.h" +#include "pt_packet_decoder.h" +#include "pt_query_decoder.h" + +#include "intel-pt.h" + + +const struct pt_decoder_function pt_decode_unknown = { + /* .packet = */ pt_pkt_decode_unknown, + /* .decode = */ pt_qry_decode_unknown, + /* .header = */ pt_qry_decode_unknown, + /* .flags = */ pdff_unknown +}; + +const struct pt_decoder_function pt_decode_pad = { + /* .packet = */ pt_pkt_decode_pad, + /* .decode = */ pt_qry_decode_pad, + /* .header = */ pt_qry_decode_pad, + /* .flags = */ pdff_pad +}; + +const struct pt_decoder_function pt_decode_psb = { + /* .packet = */ pt_pkt_decode_psb, + /* .decode = */ pt_qry_decode_psb, + /* .header = */ NULL, + /* .flags = */ 0 +}; + +const struct pt_decoder_function pt_decode_tip = { + /* .packet = */ pt_pkt_decode_tip, + /* .decode = */ pt_qry_decode_tip, + /* .header = */ NULL, + /* .flags = */ pdff_tip +}; + +const struct pt_decoder_function pt_decode_tnt_8 = { + /* .packet = */ pt_pkt_decode_tnt_8, + /* .decode = */ pt_qry_decode_tnt_8, + /* .header = */ NULL, + /* .flags = */ pdff_tnt +}; + +const struct pt_decoder_function pt_decode_tnt_64 = { + /* .packet = */ pt_pkt_decode_tnt_64, + /* .decode = */ pt_qry_decode_tnt_64, + /* .header = */ NULL, + /* .flags = */ pdff_tnt +}; + +const struct pt_decoder_function pt_decode_tip_pge = { + /* .packet = */ pt_pkt_decode_tip_pge, + /* .decode = */ pt_qry_decode_tip_pge, + /* .header = */ NULL, + /* .flags = */ pdff_event +}; + +const struct pt_decoder_function pt_decode_tip_pgd = { + /* .packet = */ pt_pkt_decode_tip_pgd, + /* .decode = */ pt_qry_decode_tip_pgd, + /* .header = */ NULL, + /* .flags = */ pdff_event +}; + +const struct pt_decoder_function pt_decode_fup = { + /* .packet = */ pt_pkt_decode_fup, + /* .decode = */ pt_qry_decode_fup, + /* .header = */ pt_qry_header_fup, + /* .flags = */ pdff_fup +}; + +const struct pt_decoder_function pt_decode_pip = { + /* .packet = */ pt_pkt_decode_pip, + /* .decode = */ pt_qry_decode_pip, + /* .header = */ pt_qry_header_pip, + /* .flags = */ pdff_event +}; + +const struct pt_decoder_function pt_decode_ovf = { + /* .packet = */ pt_pkt_decode_ovf, + /* .decode = */ pt_qry_decode_ovf, + /* .header = */ NULL, + /* .flags = */ pdff_psbend | pdff_event +}; + +const struct pt_decoder_function pt_decode_mode = { + /* .packet = */ pt_pkt_decode_mode, + /* .decode = */ pt_qry_decode_mode, + /* .header = */ pt_qry_header_mode, + /* .flags = */ pdff_event +}; + +const struct pt_decoder_function pt_decode_psbend = { + /* .packet = */ pt_pkt_decode_psbend, + /* .decode = */ pt_qry_decode_psbend, + /* .header = */ NULL, + /* .flags = */ pdff_psbend +}; + +const struct pt_decoder_function pt_decode_tsc = { + /* .packet = */ pt_pkt_decode_tsc, + /* .decode = */ pt_qry_decode_tsc, + /* .header = */ pt_qry_header_tsc, + /* .flags = */ pdff_timing +}; + +const struct pt_decoder_function pt_decode_cbr = { + /* .packet = */ pt_pkt_decode_cbr, + /* .decode = */ pt_qry_decode_cbr, + /* .header = */ pt_qry_header_cbr, + /* .flags = */ pdff_timing +}; + +const struct pt_decoder_function pt_decode_tma = { + /* .packet = */ pt_pkt_decode_tma, + /* .decode = */ pt_qry_decode_tma, + /* .header = */ pt_qry_decode_tma, + /* .flags = */ pdff_timing +}; + +const struct pt_decoder_function pt_decode_mtc = { + /* .packet = */ pt_pkt_decode_mtc, + /* .decode = */ pt_qry_decode_mtc, + /* .header = */ pt_qry_decode_mtc, + /* .flags = */ pdff_timing +}; + +const struct pt_decoder_function pt_decode_cyc = { + /* .packet = */ pt_pkt_decode_cyc, + /* .decode = */ pt_qry_decode_cyc, + /* .header = */ pt_qry_decode_cyc, + /* .flags = */ pdff_timing +}; + +const struct pt_decoder_function pt_decode_stop = { + /* .packet = */ pt_pkt_decode_stop, + /* .decode = */ pt_qry_decode_stop, + /* .header = */ NULL, + /* .flags = */ pdff_event +}; + +const struct pt_decoder_function pt_decode_vmcs = { + /* .packet = */ pt_pkt_decode_vmcs, + /* .decode = */ pt_qry_decode_vmcs, + /* .header = */ pt_qry_header_vmcs, + /* .flags = */ pdff_event +}; + +const struct pt_decoder_function pt_decode_mnt = { + /* .packet = */ pt_pkt_decode_mnt, + /* .decode = */ pt_qry_decode_mnt, + /* .header = */ pt_qry_decode_mnt, + /* .flags = */ pdff_pad +}; + + +int pt_df_fetch(const struct pt_decoder_function **dfun, const uint8_t *pos, + const struct pt_config *config) +{ + const uint8_t *begin, *end; + uint8_t opc, ext, ext2; + + if (!dfun || !config) + return -pte_internal; + + /* Clear the decode function in case of errors. */ + *dfun = NULL; + + begin = config->begin; + end = config->end; + + if (!pos || (pos < begin) || (end < pos)) + return -pte_nosync; + + if (pos == end) + return -pte_eos; + + opc = *pos++; + switch (opc) { + default: + /* Check opcodes that require masking. */ + if ((opc & pt_opm_tnt_8) == pt_opc_tnt_8) { + *dfun = &pt_decode_tnt_8; + return 0; + } + + if ((opc & pt_opm_cyc) == pt_opc_cyc) { + *dfun = &pt_decode_cyc; + return 0; + } + + if ((opc & pt_opm_tip) == pt_opc_tip) { + *dfun = &pt_decode_tip; + return 0; + } + + if ((opc & pt_opm_fup) == pt_opc_fup) { + *dfun = &pt_decode_fup; + return 0; + } + + if ((opc & pt_opm_tip) == pt_opc_tip_pge) { + *dfun = &pt_decode_tip_pge; + return 0; + } + + if ((opc & pt_opm_tip) == pt_opc_tip_pgd) { + *dfun = &pt_decode_tip_pgd; + return 0; + } + + *dfun = &pt_decode_unknown; + return 0; + + case pt_opc_pad: + *dfun = &pt_decode_pad; + return 0; + + case pt_opc_mode: + *dfun = &pt_decode_mode; + return 0; + + case pt_opc_tsc: + *dfun = &pt_decode_tsc; + return 0; + + case pt_opc_mtc: + *dfun = &pt_decode_mtc; + return 0; + + case pt_opc_ext: + if (pos == end) + return -pte_eos; + + ext = *pos++; + switch (ext) { + default: + *dfun = &pt_decode_unknown; + return 0; + + case pt_ext_psb: + *dfun = &pt_decode_psb; + return 0; + + case pt_ext_ovf: + *dfun = &pt_decode_ovf; + return 0; + + case pt_ext_tnt_64: + *dfun = &pt_decode_tnt_64; + return 0; + + case pt_ext_psbend: + *dfun = &pt_decode_psbend; + return 0; + + case pt_ext_cbr: + *dfun = &pt_decode_cbr; + return 0; + + case pt_ext_pip: + *dfun = &pt_decode_pip; + return 0; + + case pt_ext_tma: + *dfun = &pt_decode_tma; + return 0; + + case pt_ext_stop: + *dfun = &pt_decode_stop; + return 0; + + case pt_ext_vmcs: + *dfun = &pt_decode_vmcs; + return 0; + + case pt_ext_ext2: + if (pos == end) + return -pte_eos; + + ext2 = *pos++; + switch (ext2) { + default: + *dfun = &pt_decode_unknown; + return 0; + + case pt_ext2_mnt: + *dfun = &pt_decode_mnt; + return 0; + } + } + } +} diff --git a/libipt/src/pt_encoder.c b/libipt/src/pt_encoder.c new file mode 100644 index 0000000..ad2ecad --- /dev/null +++ b/libipt/src/pt_encoder.c @@ -0,0 +1,799 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_encoder.h" +#include "pt_config.h" + +#include +#include + + +int pt_encoder_init(struct pt_encoder *encoder, const struct pt_config *config) +{ + int errcode; + + if (!encoder) + return -pte_invalid; + + memset(encoder, 0, sizeof(*encoder)); + + errcode = pt_config_from_user(&encoder->config, config); + if (errcode < 0) + return errcode; + + encoder->pos = encoder->config.begin; + + return 0; +} + +void pt_encoder_fini(struct pt_encoder *encoder) +{ + (void) encoder; + + /* Nothing to do. */ +} + +struct pt_encoder *pt_alloc_encoder(const struct pt_config *config) +{ + struct pt_encoder *encoder; + int errcode; + + encoder = malloc(sizeof(*encoder)); + if (!encoder) + return NULL; + + errcode = pt_encoder_init(encoder, config); + if (errcode < 0) { + free(encoder); + return NULL; + } + + return encoder; +} + +void pt_free_encoder(struct pt_encoder *encoder) +{ + pt_encoder_fini(encoder); + free(encoder); +} + +int pt_enc_sync_set(struct pt_encoder *encoder, uint64_t offset) +{ + uint8_t *begin, *end, *pos; + + if (!encoder) + return -pte_invalid; + + begin = encoder->config.begin; + end = encoder->config.end; + pos = begin + offset; + + if (end < pos || pos < begin) + return -pte_eos; + + encoder->pos = pos; + return 0; +} + +int pt_enc_get_offset(struct pt_encoder *encoder, uint64_t *offset) +{ + const uint8_t *raw, *begin; + + if (!encoder || !offset) + return -pte_invalid; + + /* The encoder is synchronized at all times. */ + raw = encoder->pos; + if (!raw) + return -pte_internal; + + begin = encoder->config.begin; + if (!begin) + return -pte_internal; + + *offset = raw - begin; + return 0; +} + +const struct pt_config *pt_enc_get_config(const struct pt_encoder *encoder) +{ + if (!encoder) + return NULL; + + return &encoder->config; +} + +/* Check the remaining space. + * + * Returns zero if there are at least \@size bytes of free space available in + * \@encoder's Intel PT buffer. + * + * Returns -pte_eos if not enough space is available. + * Returns -pte_internal if \@encoder is NULL. + * Returns -pte_internal if \@encoder is not synchronized. + */ +static int pt_reserve(const struct pt_encoder *encoder, unsigned int size) +{ + const uint8_t *begin, *end, *pos; + + if (!encoder) + return -pte_internal; + + /* The encoder is synchronized at all times. */ + pos = encoder->pos; + if (!pos) + return -pte_internal; + + begin = encoder->config.begin; + end = encoder->config.end; + + pos += size; + if (pos < begin || end < pos) + return -pte_eos; + + return 0; +} + +/* Return the size of an IP payload based on its IP compression. + * + * Returns -pte_bad_packet if \@ipc is not a valid IP compression. + */ +static int pt_ipc_size(enum pt_ip_compression ipc) +{ + switch (ipc) { + case pt_ipc_suppressed: + return 0; + + case pt_ipc_update_16: + return pt_pl_ip_upd16_size; + + case pt_ipc_update_32: + return pt_pl_ip_upd32_size; + + case pt_ipc_update_48: + return pt_pl_ip_upd48_size; + + case pt_ipc_sext_48: + return pt_pl_ip_sext48_size; + + case pt_ipc_full: + return pt_pl_ip_full_size; + } + + return -pte_invalid; +} + +/* Encode an integer value. + * + * Writes the \@size least signifficant bytes of \@value starting from \@pos. + * + * The caller needs to ensure that there is enough space available. + * + * Returns the updated position. + */ +static uint8_t *pt_encode_int(uint8_t *pos, uint64_t val, int size) +{ + for (; size; --size, val >>= 8) + *pos++ = (uint8_t) val; + + return pos; +} + +/* Encode an IP packet. + * + * Write an IP packet with opcode \@opc and payload from \@packet if there is + * enough space in \@encoder's Intel PT buffer. + * + * Returns the number of bytes written on success. + * + * Returns -pte_eos if there is not enough space. + * Returns -pte_internal if \@encoder or \@packet is NULL. + * Returns -pte_invalid if \@packet.ipc is not valid. + */ +static int pt_encode_ip(struct pt_encoder *encoder, enum pt_opcode op, + const struct pt_packet_ip *packet) +{ + uint8_t *pos; + uint8_t opc, ipc; + int size, errcode; + + if (!encoder || !packet) + return pte_internal; + + size = pt_ipc_size(packet->ipc); + if (size < 0) + return size; + + errcode = pt_reserve(encoder, /* opc size = */ 1 + size); + if (errcode < 0) + return errcode; + + /* We already checked the ipc in pt_ipc_size(). */ + ipc = (uint8_t) (packet->ipc << pt_opm_ipc_shr); + opc = (uint8_t) op; + + pos = encoder->pos; + *pos++ = opc | ipc; + + encoder->pos = pt_encode_int(pos, packet->ip, size); + return /* opc size = */ 1 + size; +} + +int pt_enc_next(struct pt_encoder *encoder, const struct pt_packet *packet) +{ + uint8_t *pos, *begin; + int errcode; + + if (!encoder || !packet) + return -pte_invalid; + + pos = begin = encoder->pos; + switch (packet->type) { + case ppt_pad: + errcode = pt_reserve(encoder, ptps_pad); + if (errcode < 0) + return errcode; + + *pos++ = pt_opc_pad; + + encoder->pos = pos; + return (int) (pos - begin); + + case ppt_psb: { + uint64_t psb; + + errcode = pt_reserve(encoder, ptps_psb); + if (errcode < 0) + return errcode; + + psb = ((uint64_t) pt_psb_hilo << 48 | + (uint64_t) pt_psb_hilo << 32 | + (uint64_t) pt_psb_hilo << 16 | + (uint64_t) pt_psb_hilo); + + pos = pt_encode_int(pos, psb, 8); + pos = pt_encode_int(pos, psb, 8); + + encoder->pos = pos; + return (int) (pos - begin); + } + + case ppt_psbend: + errcode = pt_reserve(encoder, ptps_psbend); + if (errcode < 0) + return errcode; + + *pos++ = pt_opc_ext; + *pos++ = pt_ext_psbend; + + encoder->pos = pos; + return (int) (pos - begin); + + case ppt_ovf: + errcode = pt_reserve(encoder, ptps_ovf); + if (errcode < 0) + return errcode; + + *pos++ = pt_opc_ext; + *pos++ = pt_ext_ovf; + + encoder->pos = pos; + return (int) (pos - begin); + + case ppt_fup: + return pt_encode_ip(encoder, pt_opc_fup, &packet->payload.ip); + + case ppt_tip: + return pt_encode_ip(encoder, pt_opc_tip, &packet->payload.ip); + + case ppt_tip_pge: + return pt_encode_ip(encoder, pt_opc_tip_pge, + &packet->payload.ip); + + case ppt_tip_pgd: + return pt_encode_ip(encoder, pt_opc_tip_pgd, + &packet->payload.ip); + + case ppt_tnt_8: { + uint8_t opc, stop; + + if (packet->payload.tnt.bit_size >= 7) + return -pte_bad_packet; + + errcode = pt_reserve(encoder, ptps_tnt_8); + if (errcode < 0) + return errcode; + + stop = packet->payload.tnt.bit_size + pt_opm_tnt_8_shr; + opc = (uint8_t) packet->payload.tnt.payload << pt_opm_tnt_8_shr; + + *pos++ = opc | (1 << stop); + + encoder->pos = pos; + return (int) (pos - begin); + } + + case ppt_tnt_64: { + uint64_t tnt, stop; + + errcode = pt_reserve(encoder, ptps_tnt_64); + if (errcode < 0) + return errcode; + + if (packet->payload.tnt.bit_size >= pt_pl_tnt_64_bits) + return -pte_invalid; + + stop = 1ull << packet->payload.tnt.bit_size; + tnt = packet->payload.tnt.payload; + + if (tnt & ~(stop - 1)) + return -pte_invalid; + + *pos++ = pt_opc_ext; + *pos++ = pt_ext_tnt_64; + pos = pt_encode_int(pos, tnt | stop, pt_pl_tnt_64_size); + + encoder->pos = pos; + return (int) (pos - begin); + } + + case ppt_mode: { + uint8_t mode; + + errcode = pt_reserve(encoder, ptps_mode); + if (errcode < 0) + return errcode; + + switch (packet->payload.mode.leaf) { + default: + return -pte_bad_packet; + + case pt_mol_exec: + mode = pt_mol_exec; + + if (packet->payload.mode.bits.exec.csl) + mode |= pt_mob_exec_csl; + + if (packet->payload.mode.bits.exec.csd) + mode |= pt_mob_exec_csd; + break; + + case pt_mol_tsx: + mode = pt_mol_tsx; + + if (packet->payload.mode.bits.tsx.intx) + mode |= pt_mob_tsx_intx; + + if (packet->payload.mode.bits.tsx.abrt) + mode |= pt_mob_tsx_abrt; + break; + } + + *pos++ = pt_opc_mode; + *pos++ = mode; + + encoder->pos = pos; + return (int) (pos - begin); + } + + case ppt_pip: { + uint64_t cr3; + + errcode = pt_reserve(encoder, ptps_pip); + if (errcode < 0) + return errcode; + + cr3 = packet->payload.pip.cr3; + cr3 >>= pt_pl_pip_shl; + cr3 <<= pt_pl_pip_shr; + + if (packet->payload.pip.nr) + cr3 |= (uint64_t) pt_pl_pip_nr; + + *pos++ = pt_opc_ext; + *pos++ = pt_ext_pip; + pos = pt_encode_int(pos, cr3, pt_pl_pip_size); + + encoder->pos = pos; + return (int) (pos - begin); + } + + case ppt_tsc: + errcode = pt_reserve(encoder, ptps_tsc); + if (errcode < 0) + return errcode; + + *pos++ = pt_opc_tsc; + pos = pt_encode_int(pos, packet->payload.tsc.tsc, + pt_pl_tsc_size); + + encoder->pos = pos; + return (int) (pos - begin); + + case ppt_cbr: + errcode = pt_reserve(encoder, ptps_cbr); + if (errcode < 0) + return errcode; + + *pos++ = pt_opc_ext; + *pos++ = pt_ext_cbr; + *pos++ = packet->payload.cbr.ratio; + *pos++ = 0; + + encoder->pos = pos; + return (int) (pos - begin); + + case ppt_tma: { + uint16_t ctc, fc; + + errcode = pt_reserve(encoder, ptps_tma); + if (errcode < 0) + return errcode; + + ctc = packet->payload.tma.ctc; + fc = packet->payload.tma.fc; + + if (fc & ~pt_pl_tma_fc_mask) + return -pte_bad_packet; + + *pos++ = pt_opc_ext; + *pos++ = pt_ext_tma; + pos = pt_encode_int(pos, ctc, pt_pl_tma_ctc_size); + *pos++ = 0; + pos = pt_encode_int(pos, fc, pt_pl_tma_fc_size); + + encoder->pos = pos; + return (int) (pos - begin); + } + + case ppt_mtc: + errcode = pt_reserve(encoder, ptps_mtc); + if (errcode < 0) + return errcode; + + *pos++ = pt_opc_mtc; + *pos++ = packet->payload.mtc.ctc; + + encoder->pos = pos; + return (int) (pos - begin); + + case ppt_cyc: { + uint8_t byte[pt_pl_cyc_max_size], index, end; + uint64_t ctc; + + ctc = (uint8_t) packet->payload.cyc.value; + ctc <<= pt_opm_cyc_shr; + + byte[0] = pt_opc_cyc; + byte[0] |= (uint8_t) ctc; + + ctc = packet->payload.cyc.value; + ctc >>= (8 - pt_opm_cyc_shr); + if (ctc) + byte[0] |= pt_opm_cyc_ext; + + for (end = 1; ctc; ++end) { + /* Check if the CYC payload is too big. */ + if (pt_pl_cyc_max_size <= end) + return -pte_bad_packet; + + ctc <<= pt_opm_cycx_shr; + + byte[end] = (uint8_t) ctc; + + ctc >>= 8; + if (ctc) + byte[end] |= pt_opm_cycx_ext; + } + + errcode = pt_reserve(encoder, end); + if (errcode < 0) + return errcode; + + for (index = 0; index < end; ++index) + *pos++ = byte[index]; + + encoder->pos = pos; + return (int) (pos - begin); + } + + case ppt_stop: + errcode = pt_reserve(encoder, ptps_stop); + if (errcode < 0) + return errcode; + + *pos++ = pt_opc_ext; + *pos++ = pt_ext_stop; + + encoder->pos = pos; + return (int) (pos - begin); + + case ppt_vmcs: + errcode = pt_reserve(encoder, ptps_vmcs); + if (errcode < 0) + return errcode; + + *pos++ = pt_opc_ext; + *pos++ = pt_ext_vmcs; + pos = pt_encode_int(pos, + packet->payload.vmcs.base >> pt_pl_vmcs_shl, + pt_pl_vmcs_size); + + encoder->pos = pos; + return (int) (pos - begin); + + case ppt_mnt: + errcode = pt_reserve(encoder, ptps_mnt); + if (errcode < 0) + return errcode; + + *pos++ = pt_opc_ext; + *pos++ = pt_ext_ext2; + *pos++ = pt_ext2_mnt; + pos = pt_encode_int(pos, packet->payload.mnt.payload, + pt_pl_mnt_size); + + encoder->pos = pos; + return (int) (pos - begin); + + case ppt_unknown: + case ppt_invalid: + return -pte_bad_opc; + } + + return -pte_bad_opc; +} + +int pt_encode_pad(struct pt_encoder *encoder) +{ + struct pt_packet packet; + + packet.type = ppt_pad; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_psb(struct pt_encoder *encoder) +{ + struct pt_packet packet; + + packet.type = ppt_psb; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_psbend(struct pt_encoder *encoder) +{ + struct pt_packet packet; + + packet.type = ppt_psbend; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_tip(struct pt_encoder *encoder, uint64_t ip, + enum pt_ip_compression ipc) +{ + struct pt_packet packet; + + packet.type = ppt_tip; + packet.payload.ip.ip = ip; + packet.payload.ip.ipc = ipc; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_tnt_8(struct pt_encoder *encoder, uint8_t tnt, int size) +{ + struct pt_packet packet; + + packet.type = ppt_tnt_8; + packet.payload.tnt.bit_size = (uint8_t) size; + packet.payload.tnt.payload = tnt; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_tnt_64(struct pt_encoder *encoder, uint64_t tnt, int size) +{ + struct pt_packet packet; + + packet.type = ppt_tnt_64; + packet.payload.tnt.bit_size = (uint8_t) size; + packet.payload.tnt.payload = tnt; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_tip_pge(struct pt_encoder *encoder, uint64_t ip, + enum pt_ip_compression ipc) +{ + struct pt_packet packet; + + packet.type = ppt_tip_pge; + packet.payload.ip.ip = ip; + packet.payload.ip.ipc = ipc; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_tip_pgd(struct pt_encoder *encoder, uint64_t ip, + enum pt_ip_compression ipc) +{ + struct pt_packet packet; + + packet.type = ppt_tip_pgd; + packet.payload.ip.ip = ip; + packet.payload.ip.ipc = ipc; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_fup(struct pt_encoder *encoder, uint64_t ip, + enum pt_ip_compression ipc) +{ + struct pt_packet packet; + + packet.type = ppt_fup; + packet.payload.ip.ip = ip; + packet.payload.ip.ipc = ipc; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_pip(struct pt_encoder *encoder, uint64_t cr3, uint8_t flags) +{ + struct pt_packet packet; + + packet.type = ppt_pip; + packet.payload.pip.cr3 = cr3; + packet.payload.pip.nr = (flags & pt_pl_pip_nr) != 0; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_ovf(struct pt_encoder *encoder) +{ + struct pt_packet packet; + + packet.type = ppt_ovf; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_mode_exec(struct pt_encoder *encoder, enum pt_exec_mode mode) +{ + struct pt_packet packet; + + packet.type = ppt_mode; + packet.payload.mode.leaf = pt_mol_exec; + packet.payload.mode.bits.exec = pt_set_exec_mode(mode); + + return pt_enc_next(encoder, &packet); +} + + +int pt_encode_mode_tsx(struct pt_encoder *encoder, uint8_t bits) +{ + struct pt_packet packet; + + packet.type = ppt_mode; + packet.payload.mode.leaf = pt_mol_tsx; + + if (bits & pt_mob_tsx_intx) + packet.payload.mode.bits.tsx.intx = 1; + else + packet.payload.mode.bits.tsx.intx = 0; + + if (bits & pt_mob_tsx_abrt) + packet.payload.mode.bits.tsx.abrt = 1; + else + packet.payload.mode.bits.tsx.abrt = 0; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_tsc(struct pt_encoder *encoder, uint64_t tsc) +{ + struct pt_packet packet; + + packet.type = ppt_tsc; + packet.payload.tsc.tsc = tsc; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_cbr(struct pt_encoder *encoder, uint8_t cbr) +{ + struct pt_packet packet; + + packet.type = ppt_cbr; + packet.payload.cbr.ratio = cbr; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_tma(struct pt_encoder *encoder, uint16_t ctc, uint16_t fc) +{ + struct pt_packet packet; + + packet.type = ppt_tma; + packet.payload.tma.ctc = ctc; + packet.payload.tma.fc = fc; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_mtc(struct pt_encoder *encoder, uint8_t ctc) +{ + struct pt_packet packet; + + packet.type = ppt_mtc; + packet.payload.mtc.ctc = ctc; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_cyc(struct pt_encoder *encoder, uint32_t ctc) +{ + struct pt_packet packet; + + packet.type = ppt_cyc; + packet.payload.cyc.value = ctc; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_stop(struct pt_encoder *encoder) +{ + struct pt_packet packet; + + packet.type = ppt_stop; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_vmcs(struct pt_encoder *encoder, uint64_t payload) +{ + struct pt_packet packet; + + packet.type = ppt_vmcs; + packet.payload.vmcs.base = payload; + + return pt_enc_next(encoder, &packet); +} + +int pt_encode_mnt(struct pt_encoder *encoder, uint64_t payload) +{ + struct pt_packet packet; + + packet.type = ppt_mnt; + packet.payload.mnt.payload = payload; + + return pt_enc_next(encoder, &packet); +} diff --git a/libipt/src/pt_error.c b/libipt/src/pt_error.c new file mode 100644 index 0000000..fcd8073 --- /dev/null +++ b/libipt/src/pt_error.c @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "intel-pt.h" + + +const char *pt_errstr(enum pt_error_code errcode) +{ + switch (errcode) { + case pte_ok: + return "OK"; + + case pte_internal: + return "internal error"; + + case pte_invalid: + return "invalid argument"; + + case pte_nosync: + return "decoder out of sync"; + + case pte_bad_opc: + return "unknown opcode"; + + case pte_bad_packet: + return "unknown packet"; + + case pte_bad_context: + return "unexpected packet context"; + + case pte_eos: + return "reached end of trace stream"; + + case pte_bad_query: + return "trace stream does not match query"; + + case pte_nomem: + return "out of memory"; + + case pte_bad_config: + return "bad configuration"; + + case pte_noip: + return "no ip"; + + case pte_ip_suppressed: + return "ip has been suppressed"; + + case pte_nomap: + return "no memory mapped at this address"; + + case pte_bad_insn: + return "unknown instruction"; + + case pte_no_time: + return "no timing information"; + + case pte_no_cbr: + return "no core:bus ratio"; + + case pte_bad_image: + return "bad image"; + + case pte_bad_lock: + return "locking error"; + + case pte_not_supported: + return "not supported"; + + case pte_retstack_empty: + return "compressed return without call"; + + case pte_bad_retcomp: + return "bad compressed return"; + + case pte_bad_status_update: + return "bad status update"; + + case pte_no_enable: + return "expected tracing enabled event"; + + case pte_event_ignored: + return "event ignored"; + } + + /* Should not reach here. */ + return "internal error."; +} diff --git a/libipt/src/pt_event_queue.c b/libipt/src/pt_event_queue.c new file mode 100644 index 0000000..d3b6487 --- /dev/null +++ b/libipt/src/pt_event_queue.c @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_event_queue.h" + +#include + + +static inline uint8_t pt_evq_inc(uint8_t idx) +{ + idx += 1; + idx %= evq_max; + + return idx; +} + +static struct pt_event *pt_event_init(struct pt_event *event) +{ + if (event) + memset(event, 0, sizeof(*event)); + + return event; +} + +void pt_evq_init(struct pt_event_queue *evq) +{ + if (!evq) + return; + + memset(evq, 0, sizeof(*evq)); +} + +struct pt_event *pt_evq_standalone(struct pt_event_queue *evq) +{ + if (!evq) + return NULL; + + return pt_event_init(&evq->standalone); +} + +struct pt_event *pt_evq_enqueue(struct pt_event_queue *evq, + enum pt_event_binding evb) +{ + uint8_t begin, end, gap, idx; + + if (!evq) + return NULL; + + if (evb_max <= evb) + return NULL; + + begin = evq->begin[evb]; + idx = evq->end[evb]; + + if (evq_max <= begin) + return NULL; + + if (evq_max <= idx) + return NULL; + + end = pt_evq_inc(idx); + gap = pt_evq_inc(end); + + /* Leave a gap so we don't overwrite the last dequeued event. */ + if (begin == gap) + return NULL; + + evq->end[evb] = end; + + return pt_event_init(&evq->queue[evb][idx]); +} + +struct pt_event *pt_evq_dequeue(struct pt_event_queue *evq, + enum pt_event_binding evb) +{ + uint8_t begin, end; + + if (!evq) + return NULL; + + if (evb_max <= evb) + return NULL; + + begin = evq->begin[evb]; + end = evq->end[evb]; + + if (evq_max <= begin) + return NULL; + + if (evq_max <= end) + return NULL; + + if (begin == end) + return NULL; + + evq->begin[evb] = pt_evq_inc(begin); + + return &evq->queue[evb][begin]; +} + +int pt_evq_clear(struct pt_event_queue *evq, enum pt_event_binding evb) +{ + if (!evq) + return -pte_internal; + + if (evb_max <= evb) + return -pte_internal; + + evq->begin[evb] = 0; + evq->end[evb] = 0; + + return 0; +} + +int pt_evq_empty(const struct pt_event_queue *evq, enum pt_event_binding evb) +{ + uint8_t begin, end; + + if (!evq) + return -pte_internal; + + if (evb_max <= evb) + return -pte_internal; + + begin = evq->begin[evb]; + end = evq->end[evb]; + + if (evq_max <= begin) + return -pte_internal; + + if (evq_max <= end) + return -pte_internal; + + return begin == end; +} + +int pt_evq_pending(const struct pt_event_queue *evq, enum pt_event_binding evb) +{ + int errcode; + + errcode = pt_evq_empty(evq, evb); + if (errcode < 0) + return errcode; + + return !errcode; +} + +struct pt_event *pt_evq_find(struct pt_event_queue *evq, + enum pt_event_binding evb, + enum pt_event_type evt) +{ + uint8_t begin, end; + + if (!evq) + return NULL; + + if (evb_max <= evb) + return NULL; + + begin = evq->begin[evb]; + end = evq->end[evb]; + + if (evq_max <= begin) + return NULL; + + if (evq_max <= end) + return NULL; + + for (; begin != end; begin = pt_evq_inc(begin)) { + struct pt_event *ev; + + ev = &evq->queue[evb][begin]; + if (ev->type == evt) + return ev; + } + + return NULL; +} diff --git a/libipt/src/pt_ild.c b/libipt/src/pt_ild.c new file mode 100644 index 0000000..a864dff --- /dev/null +++ b/libipt/src/pt_ild.c @@ -0,0 +1,1212 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_ild.h" +#include "pti-imm-defs.h" +#include "pti-imm.h" +#include "pti-modrm-defs.h" +#include "pti-modrm.h" +#include "pti-disp-defs.h" +#include "pti-disp.h" + +#include + +/* SET UP 3 TABLES */ + +static uint8_t has_disp_regular[4][4][8]; + +static void init_has_disp_regular_table(void) +{ + uint8_t mod, rm; + + memset(has_disp_regular, 0, sizeof(has_disp_regular)); + + /*fill eamode16 */ + has_disp_regular[ptem_16bit][0][6] = 2; + for (rm = 0; rm < 8; rm++) + for (mod = 1; mod <= 2; mod++) + has_disp_regular[ptem_16bit][mod][rm] = mod; + + /*fill eamode32/64 */ + has_disp_regular[ptem_32bit][0][5] = 4; + has_disp_regular[ptem_64bit][0][5] = 4; + for (rm = 0; rm < 8; rm++) { + has_disp_regular[ptem_32bit][1][rm] = 1; + has_disp_regular[ptem_32bit][2][rm] = 4; + + has_disp_regular[ptem_64bit][1][rm] = 1; + has_disp_regular[ptem_64bit][2][rm] = 4; + } +} + +static uint8_t eamode_table[2][4]; + +static void init_eamode_table(void) +{ + eamode_table[0][ptem_unknown] = ptem_unknown; + eamode_table[0][ptem_16bit] = ptem_16bit; + eamode_table[0][ptem_32bit] = ptem_32bit; + eamode_table[0][ptem_64bit] = ptem_64bit; + + eamode_table[1][ptem_unknown] = ptem_unknown; + eamode_table[1][ptem_16bit] = ptem_32bit; + eamode_table[1][ptem_32bit] = ptem_16bit; + eamode_table[1][ptem_64bit] = ptem_32bit; +} + +static uint8_t has_sib_table[4][4][8]; + +static void init_has_sib_table(void) +{ + uint8_t mod; + + memset(has_sib_table, 0, sizeof(has_sib_table)); + + /*for eamode32/64 there is sib byte for mod!=3 and rm==4 */ + for (mod = 0; mod <= 2; mod++) { + has_sib_table[ptem_32bit][mod][4] = 1; + has_sib_table[ptem_64bit][mod][4] = 1; + } +} + +/* SOME ACCESSORS */ + +static inline uint8_t get_byte(const struct pt_ild *ild, uint8_t i) +{ + return ild->itext[i]; +} + +static inline uint8_t const *get_byte_ptr(const struct pt_ild *ild, uint8_t i) +{ + return ild->itext + i; +} + +static inline int mode_64b(const struct pt_ild *ild) +{ + return ild->mode == ptem_64bit; +} + +static inline int mode_32b(const struct pt_ild *ild) +{ + return ild->mode == ptem_32bit; +} + +static inline int bits_match(uint8_t x, uint8_t mask, uint8_t target) +{ + return (x & mask) == target; +} + +static inline enum pt_exec_mode +pti_get_nominal_eosz_non64(const struct pt_ild *ild) +{ + if (mode_32b(ild)) { + if (ild->u.s.osz) + return ptem_16bit; + return ptem_32bit; + } + if (ild->u.s.osz) + return ptem_32bit; + return ptem_16bit; +} + +static inline enum pt_exec_mode +pti_get_nominal_eosz(const struct pt_ild *ild) +{ + if (mode_64b(ild)) { + if (ild->u.s.rex_w) + return ptem_64bit; + if (ild->u.s.osz) + return ptem_16bit; + return ptem_32bit; + } + return pti_get_nominal_eosz_non64(ild); +} + +static inline enum pt_exec_mode +pti_get_nominal_eosz_df64(const struct pt_ild *ild) +{ + if (mode_64b(ild)) { + if (ild->u.s.rex_w) + return ptem_64bit; + if (ild->u.s.osz) + return ptem_16bit; + /* only this next line of code is different relative + to pti_get_nominal_eosz(), above */ + return ptem_64bit; + } + return pti_get_nominal_eosz_non64(ild); +} + +static inline enum pt_exec_mode +pti_get_nominal_easz_non64(const struct pt_ild *ild) +{ + if (mode_32b(ild)) { + if (ild->u.s.asz) + return ptem_16bit; + return ptem_32bit; + } + if (ild->u.s.asz) + return ptem_32bit; + return ptem_16bit; +} + +static inline enum pt_exec_mode +pti_get_nominal_easz(const struct pt_ild *ild) +{ + if (mode_64b(ild)) { + if (ild->u.s.asz) + return ptem_32bit; + return ptem_64bit; + } + return pti_get_nominal_easz_non64(ild); +} + +static inline int resolve_z(uint8_t *pbytes, enum pt_exec_mode eosz) +{ + static const uint8_t bytes[] = { 2, 4, 4 }; + unsigned int idx; + + if (!pbytes) + return -pte_internal; + + idx = (unsigned int) eosz - 1; + if (sizeof(bytes) <= idx) + return -pte_bad_insn; + + *pbytes = bytes[idx]; + return 0; +} + +static inline int resolve_v(uint8_t *pbytes, enum pt_exec_mode eosz) +{ + static const uint8_t bytes[] = { 2, 4, 8 }; + unsigned int idx; + + if (!pbytes) + return -pte_internal; + + idx = (unsigned int) eosz - 1; + if (sizeof(bytes) <= idx) + return -pte_bad_insn; + + *pbytes = bytes[idx]; + return 0; +} + +/* DECODERS */ + +static int set_imm_bytes(struct pt_ild *ild) +{ + /*: set ild->imm1_bytes and ild->imm2_bytes for maps 0/1 */ + static uint8_t const *const map_map[] = { + /* map 0 */ imm_bytes_map_0x0, + /* map 1 */ imm_bytes_map_0x0F + }; + uint8_t map, imm_code; + + if (!ild) + return -pte_internal; + + map = ild->map; + + if ((sizeof(map_map) / sizeof(*map_map)) <= map) + return 0; + + imm_code = map_map[map][ild->nominal_opcode]; + switch (imm_code) { + case PTI_IMM_NONE: + case PTI_0_IMM_WIDTH_CONST_l2: + default: + return 0; + + case PTI_UIMM8_IMM_WIDTH_CONST_l2: + ild->imm1_bytes = 1; + return 0; + + case PTI_SIMM8_IMM_WIDTH_CONST_l2: + ild->imm1_bytes = 1; + return 0; + + case PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2: + /* SIMMz(eosz) */ + return resolve_z(&ild->imm1_bytes, pti_get_nominal_eosz(ild)); + + case PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2: + /* UIMMv(eosz) */ + return resolve_v(&ild->imm1_bytes, pti_get_nominal_eosz(ild)); + + case PTI_UIMM16_IMM_WIDTH_CONST_l2: + ild->imm1_bytes = 2; + return 0; + + case PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_DF64_EOSZ_l2: + /* push defaults to eosz64 in 64b mode, then uses SIMMz */ + return resolve_z(&ild->imm1_bytes, + pti_get_nominal_eosz_df64(ild)); + + case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf7_l1: + if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) < 2) { + return resolve_z(&ild->imm1_bytes, + pti_get_nominal_eosz(ild)); + } + return 0; + + case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xc7_l1: + if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) == 0) { + return resolve_z(&ild->imm1_bytes, + pti_get_nominal_eosz(ild)); + } + return 0; + + case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf6_l1: + if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) < 2) + ild->imm1_bytes = 1; + + return 0; + + case PTI_IMM_hasimm_map0x0_op0xc8_l1: + if (ild->map == PTI_MAP_0) { + /*enter -> imm1=2, imm2=1 */ + ild->imm1_bytes = 2; + ild->imm2_bytes = 1; + } + return 0; + + case PTI_IMM_hasimm_map0x0F_op0x78_l1: + /* AMD SSE4a (insertq/extrq use osz/f2) vs vmread + * (no prefixes) + */ + if (ild->map == PTI_MAP_1) { + if (ild->u.s.osz || ild->u.s.last_f2f3 == 2) { + ild->imm1_bytes = 1; + ild->imm2_bytes = 1; + } + } + return 0; + } +} + +static int imm_dec(struct pt_ild *ild, uint8_t length) +{ + int errcode; + + if (!ild) + return -pte_internal; + + if (ild->map == PTI_MAP_AMD3DNOW) { + if (ild->max_bytes <= length) + return -pte_bad_insn; + + ild->nominal_opcode = get_byte(ild, length); + return length + 1; + } + + errcode = set_imm_bytes(ild); + if (errcode < 0) + return errcode; + + length += ild->imm1_bytes; + length += ild->imm2_bytes; + if (ild->max_bytes < length) + return -pte_bad_insn; + + return length; +} + +static int compute_disp_dec(struct pt_ild *ild) +{ + /* set ild->disp_bytes for maps 0 and 1. */ + static uint8_t const *const map_map[] = { + /* map 0 */ disp_bytes_map_0x0, + /* map 1 */ disp_bytes_map_0x0F + }; + uint8_t map, disp_kind; + + if (!ild) + return -pte_internal; + + if (0 < ild->disp_bytes) + return 0; + + map = ild->map; + + if ((sizeof(map_map) / sizeof(*map_map)) <= map) + return 0; + + disp_kind = map_map[map][ild->nominal_opcode]; + switch (disp_kind) { + case PTI_DISP_NONE: + ild->disp_bytes = 0; + return 0; + + case PTI_PRESERVE_DEFAULT: + /* nothing to do */ + return 0; + + case PTI_BRDISP8: + ild->disp_bytes = 1; + return 0; + + case PTI_DISP_BUCKET_0_l1: + /* BRDISPz(eosz) for 16/32 modes, and BRDISP32 for 64b mode */ + if (mode_64b(ild)) { + ild->disp_bytes = 4; + return 0; + } + + return resolve_z(&ild->disp_bytes, + pti_get_nominal_eosz(ild)); + + case PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2: + /* MEMDISPv(easz) */ + return resolve_v(&ild->disp_bytes, pti_get_nominal_easz(ild)); + + case PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2: + /* BRDISPz(eosz) for 16/32/64 modes */ + return resolve_z(&ild->disp_bytes, pti_get_nominal_eosz(ild)); + + case PTI_RESOLVE_BYREG_DISP_map0x0_op0xc7_l1: + /* reg=0 -> preserve, reg=7 -> BRDISPz(eosz) */ + if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) == 7) { + return resolve_z(&ild->disp_bytes, + pti_get_nominal_eosz(ild)); + } + return 0; + + default: + return -pte_bad_insn; + } +} + +static int disp_dec(struct pt_ild *ild, uint8_t length) +{ + uint8_t disp_bytes; + int errcode; + + if (!ild) + return -pte_internal; + + errcode = compute_disp_dec(ild); + if (errcode < 0) + return errcode; + + disp_bytes = ild->disp_bytes; + if (disp_bytes == 0) + return imm_dec(ild, length); + + if (length + disp_bytes > ild->max_bytes) + return -pte_bad_insn; + + /*Record only position; must be able to re-read itext bytes for actual + value. (SMC/CMC issue). */ + ild->disp_pos = length; + + return imm_dec(ild, length + disp_bytes); +} + +static int sib_dec(struct pt_ild *ild, uint8_t length) +{ + uint8_t sib; + + if (!ild) + return -pte_internal; + + if (ild->max_bytes <= length) + return -pte_bad_insn; + + sib = get_byte(ild, length); + if ((sib & 0x07) == 0x05 && pti_get_modrm_mod(ild) == 0) + ild->disp_bytes = 4; + + return disp_dec(ild, length + 1); +} + +static int modrm_dec(struct pt_ild *ild, uint8_t length) +{ + static uint8_t const *const has_modrm_2d[2] = { + has_modrm_map_0x0, + has_modrm_map_0x0F + }; + int has_modrm = PTI_MODRM_FALSE; + pti_map_enum_t map; + + if (!ild) + return -pte_internal; + + map = pti_get_map(ild); + if (map >= PTI_MAP_2) + has_modrm = PTI_MODRM_TRUE; + else + has_modrm = has_modrm_2d[map][ild->nominal_opcode]; + + if (has_modrm == PTI_MODRM_FALSE || has_modrm == PTI_MODRM_UNDEF) + return disp_dec(ild, length); + + /* really >= here because we have not eaten the byte yet */ + if (length >= ild->max_bytes) + return -pte_bad_insn; + + ild->modrm_byte = get_byte(ild, length); + + if (has_modrm != PTI_MODRM_IGNORE_MOD) { + /* set disp_bytes and sib using simple tables */ + + uint8_t eamode = eamode_table[ild->u.s.asz][ild->mode]; + uint8_t mod = (uint8_t) pti_get_modrm_mod(ild); + uint8_t rm = (uint8_t) pti_get_modrm_rm(ild); + uint8_t has_sib; + + ild->disp_bytes = has_disp_regular[eamode][mod][rm]; + + has_sib = has_sib_table[eamode][mod][rm]; + if (has_sib) + return sib_dec(ild, length + 1); + } + + return disp_dec(ild, length + 1); +} + +static inline int get_next_as_opcode(struct pt_ild *ild, uint8_t length) +{ + if (!ild) + return -pte_internal; + + if (ild->max_bytes <= length) + return -pte_bad_insn; + + ild->nominal_opcode = get_byte(ild, length); + + return modrm_dec(ild, length + 1); +} + +static int opcode_dec(struct pt_ild *ild, uint8_t length) +{ + uint8_t b, m; + + if (!ild) + return -pte_internal; + + /*no need to check max_bytes - it was checked in previous scanners */ + b = get_byte(ild, length); + if (b != 0x0F) { /* 1B opcodes, map 0 */ + ild->map = PTI_MAP_0; + ild->nominal_opcode = b; + + return modrm_dec(ild, length + 1); + } + + length++; /* eat the 0x0F */ + + if (ild->max_bytes <= length) + return -pte_bad_insn; + + /* 0x0F opcodes MAPS 1,2,3 */ + m = get_byte(ild, length); + if (m == 0x38) { + ild->map = PTI_MAP_2; + + return get_next_as_opcode(ild, length + 1); + } else if (m == 0x3A) { + ild->map = PTI_MAP_3; + ild->imm1_bytes = 1; + + return get_next_as_opcode(ild, length + 1); + } else if (bits_match(m, 0xf8, 0x38)) { + ild->map = PTI_MAP_INVALID; + + return get_next_as_opcode(ild, length + 1); + } else if (m == 0x0F) { /* 3dNow */ + ild->map = PTI_MAP_AMD3DNOW; + ild->imm1_bytes = 1; + /* real opcode is in immediate later on, but we need an + * opcode now. */ + ild->nominal_opcode = 0x0F; + + return modrm_dec(ild, length + 1); + } else { /* map 1 (simple two byte opcodes) */ + ild->nominal_opcode = m; + ild->map = PTI_MAP_1; + + return modrm_dec(ild, length + 1); + } +} + +typedef int (*prefix_decoder)(struct pt_ild *ild, uint8_t length, uint8_t rex); +static prefix_decoder prefix_table[256]; + +static inline int prefix_decode(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + uint8_t byte; + + if (!ild) + return -pte_internal; + + if (ild->max_bytes <= length) + return -pte_bad_insn; + + byte = get_byte(ild, length); + + return prefix_table[byte](ild, length, rex); +} + +static inline int prefix_next(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + return prefix_decode(ild, length + 1, rex); +} + +static int prefix_osz(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + (void) rex; + + if (!ild) + return -pte_internal; + + ild->u.s.osz = 1; + + return prefix_next(ild, length, 0); +} + +static int prefix_asz(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + (void) rex; + + if (!ild) + return -pte_internal; + + ild->u.s.asz = 1; + + return prefix_next(ild, length, 0); +} + +static int prefix_lock(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + (void) rex; + + if (!ild) + return -pte_internal; + + ild->u.s.lock = 1; + + return prefix_next(ild, length, 0); +} + +static int prefix_f2(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + (void) rex; + + if (!ild) + return -pte_internal; + + ild->u.s.f2 = 1; + ild->u.s.last_f2f3 = 2; + + return prefix_next(ild, length, 0); +} + +static int prefix_f3(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + (void) rex; + + if (!ild) + return -pte_internal; + + ild->u.s.f3 = 1; + ild->u.s.last_f2f3 = 3; + + return prefix_next(ild, length, 0); +} + +static int prefix_ignore(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + (void) rex; + + return prefix_next(ild, length, 0); +} + +static int prefix_done(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + if (!ild) + return -pte_internal; + + if (rex & 0x04) + ild->u.s.rex_r = 1; + if (rex & 0x08) + ild->u.s.rex_w = 1; + + return opcode_dec(ild, length); +} + +static int prefix_rex(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + (void) rex; + + if (!ild) + return -pte_internal; + + if (mode_64b(ild)) + return prefix_next(ild, length, get_byte(ild, length)); + else + return opcode_dec(ild, length); +} + +static inline int prefix_vex_done(struct pt_ild *ild, uint8_t length) +{ + if (!ild) + return -pte_internal; + + ild->nominal_opcode = get_byte(ild, length); + + return modrm_dec(ild, length + 1); +} + +static int prefix_vex_c5(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + uint8_t max_bytes; + uint8_t p1; + + (void) rex; + + if (!ild) + return -pte_internal; + + max_bytes = ild->max_bytes; + + /* Read the next byte to validate that this is indeed VEX. */ + if (max_bytes <= (length + 1)) + return -pte_bad_insn; + + p1 = get_byte(ild, length + 1); + + /* If p1[7:6] is not 11b in non-64-bit mode, this is LDS, not VEX. */ + if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0)) + return opcode_dec(ild, length); + + /* We need at least 3 bytes + * - 2 for the VEX prefix and payload and + * - 1 for the opcode. + */ + if (max_bytes < (length + 3)) + return -pte_bad_insn; + + ild->u.s.vex = 1; + if (p1 & 0x80) + ild->u.s.rex_r = 1; + + ild->map = PTI_MAP_1; + + /* Eat the VEX. */ + length += 2; + return prefix_vex_done(ild, length); +} + +static int prefix_vex_c4(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + uint8_t max_bytes; + uint8_t p1, p2, map; + + (void) rex; + + if (!ild) + return -pte_internal; + + max_bytes = ild->max_bytes; + + /* Read the next byte to validate that this is indeed VEX. */ + if (max_bytes <= (length + 1)) + return -pte_bad_insn; + + p1 = get_byte(ild, length + 1); + + /* If p1[7:6] is not 11b in non-64-bit mode, this is LES, not VEX. */ + if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0)) + return opcode_dec(ild, length); + + /* We need at least 4 bytes + * - 3 for the VEX prefix and payload and + * - 1 for the opcode. + */ + if (max_bytes < (length + 4)) + return -pte_bad_insn; + + p2 = get_byte(ild, length + 2); + + ild->u.s.vex = 1; + if (p1 & 0x80) + ild->u.s.rex_r = 1; + if (p2 & 0x80) + ild->u.s.rex_w = 1; + + map = p1 & 0x1f; + if (PTI_MAP_INVALID <= map) + return -pte_bad_insn; + + ild->map = map; + if (map == PTI_MAP_3) + ild->imm1_bytes = 1; + + /* Eat the VEX. */ + length += 3; + return prefix_vex_done(ild, length); +} + +static int prefix_evex(struct pt_ild *ild, uint8_t length, uint8_t rex) +{ + uint8_t max_bytes; + uint8_t p1, p2, map; + + (void) rex; + + if (!ild) + return -pte_internal; + + max_bytes = ild->max_bytes; + + /* Read the next byte to validate that this is indeed EVEX. */ + if (max_bytes <= (length + 1)) + return -pte_bad_insn; + + p1 = get_byte(ild, length + 1); + + /* If p1[7:6] is not 11b in non-64-bit mode, this is BOUND, not EVEX. */ + if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0)) + return opcode_dec(ild, length); + + /* We need at least 5 bytes + * - 4 for the EVEX prefix and payload and + * - 1 for the opcode. + */ + if (max_bytes < (length + 5)) + return -pte_bad_insn; + + p2 = get_byte(ild, length + 2); + + ild->u.s.vex = 1; + if (p1 & 0x80) + ild->u.s.rex_r = 1; + if (p2 & 0x80) + ild->u.s.rex_w = 1; + + map = p1 & 0x03; + ild->map = map; + + if (map == PTI_MAP_3) + ild->imm1_bytes = 1; + + /* Eat the EVEX. */ + length += 4; + return prefix_vex_done(ild, length); +} + +static void init_prefix_table(void) +{ + unsigned int byte; + + for (byte = 0; byte <= 0xff; ++byte) + prefix_table[byte] = prefix_done; + + prefix_table[0x66] = prefix_osz; + prefix_table[0x67] = prefix_asz; + + /* Segment prefixes. */ + prefix_table[0x2e] = prefix_ignore; + prefix_table[0x3e] = prefix_ignore; + prefix_table[0x26] = prefix_ignore; + prefix_table[0x36] = prefix_ignore; + prefix_table[0x64] = prefix_ignore; + prefix_table[0x65] = prefix_ignore; + + prefix_table[0xf0] = prefix_lock; + prefix_table[0xf2] = prefix_f2; + prefix_table[0xf3] = prefix_f3; + + for (byte = 0x40; byte <= 0x4f; ++byte) + prefix_table[byte] = prefix_rex; + + prefix_table[0xc4] = prefix_vex_c4; + prefix_table[0xc5] = prefix_vex_c5; + prefix_table[0x62] = prefix_evex; +} + +static int decode(struct pt_ild *ild) +{ + return prefix_decode(ild, 0, 0); +} + +static int set_branch_target(struct pt_insn_ext *iext, const struct pt_ild *ild) +{ + if (!iext || !ild) + return -pte_internal; + + iext->variant.branch.is_direct = 1; + + if (ild->disp_bytes == 1) { + int8_t *b = (int8_t *) (get_byte_ptr(ild, ild->disp_pos)); + + iext->variant.branch.displacement = *b; + } else if (ild->disp_bytes == 2) { + int16_t *w = (int16_t *) (get_byte_ptr(ild, ild->disp_pos)); + + iext->variant.branch.displacement = *w; + } else if (ild->disp_bytes == 4) { + int32_t *d = (int32_t *) (get_byte_ptr(ild, ild->disp_pos)); + + iext->variant.branch.displacement = *d; + } else + return -pte_bad_insn; + + return 0; +} + +/* MAIN ENTRY POINTS */ + +void pt_ild_init(void) +{ /* initialization */ + init_has_disp_regular_table(); + init_has_sib_table(); + init_eamode_table(); + init_prefix_table(); +} + +static int pt_instruction_length_decode(struct pt_ild *ild) +{ + if (!ild) + return -pte_internal; + + ild->u.i = 0; + ild->imm1_bytes = 0; + ild->imm2_bytes = 0; + ild->disp_bytes = 0; + ild->modrm_byte = 0; + ild->map = PTI_MAP_INVALID; + + if (!ild->mode) + return -pte_bad_insn; + + return decode(ild); +} + +static int pt_instruction_decode(struct pt_insn *insn, struct pt_insn_ext *iext, + const struct pt_ild *ild) +{ + uint8_t opcode, map; + + if (!iext || !ild) + return -pte_internal; + + iext->iclass = PTI_INST_INVALID; + memset(&iext->variant, 0, sizeof(iext->variant)); + + insn->iclass = ptic_other; + + opcode = ild->nominal_opcode; + map = ild->map; + + if (map > PTI_MAP_1) + return 0; /* uninteresting */ + if (ild->u.s.vex) + return 0; /* uninteresting */ + + /* PTI_INST_JCC, 70...7F, 0F (0x80...0x8F) */ + if (opcode >= 0x70 && opcode <= 0x7F) { + if (map == PTI_MAP_0) { + insn->iclass = ptic_cond_jump; + iext->iclass = PTI_INST_JCC; + + return set_branch_target(iext, ild); + } + return 0; + } + if (opcode >= 0x80 && opcode <= 0x8F) { + if (map == PTI_MAP_1) { + insn->iclass = ptic_cond_jump; + iext->iclass = PTI_INST_JCC; + + return set_branch_target(iext, ild); + } + return 0; + } + + switch (ild->nominal_opcode) { + case 0x9A: + if (map == PTI_MAP_0) { + insn->iclass = ptic_far_call; + iext->iclass = PTI_INST_CALL_9A; + } + return 0; + + case 0xFF: + if (map == PTI_MAP_0) { + uint8_t reg = pti_get_modrm_reg(ild); + + if (reg == 2) { + insn->iclass = ptic_call; + iext->iclass = PTI_INST_CALL_FFr2; + } else if (reg == 3) { + insn->iclass = ptic_far_call; + iext->iclass = PTI_INST_CALL_FFr3; + } else if (reg == 4) { + insn->iclass = ptic_jump; + iext->iclass = PTI_INST_JMP_FFr4; + } else if (reg == 5) { + insn->iclass = ptic_far_jump; + iext->iclass = PTI_INST_JMP_FFr5; + } + } + return 0; + + case 0xE8: + if (map == PTI_MAP_0) { + insn->iclass = ptic_call; + iext->iclass = PTI_INST_CALL_E8; + + return set_branch_target(iext, ild); + } + return 0; + + case 0xCD: + if (map == PTI_MAP_0) { + insn->iclass = ptic_far_call; + iext->iclass = PTI_INST_INT; + } + + return 0; + + case 0xCC: + if (map == PTI_MAP_0) { + insn->iclass = ptic_far_call; + iext->iclass = PTI_INST_INT3; + } + + return 0; + + case 0xCE: + if (map == PTI_MAP_0) { + insn->iclass = ptic_far_call; + iext->iclass = PTI_INST_INTO; + } + + return 0; + + case 0xF1: + if (map == PTI_MAP_0) { + insn->iclass = ptic_far_call; + iext->iclass = PTI_INST_INT1; + } + + return 0; + + case 0xCF: + if (map == PTI_MAP_0) { + insn->iclass = ptic_far_return; + iext->iclass = PTI_INST_IRET; + } + return 0; + + case 0xE9: + if (map == PTI_MAP_0) { + insn->iclass = ptic_jump; + iext->iclass = PTI_INST_JMP_E9; + + return set_branch_target(iext, ild); + } + return 0; + + case 0xEA: + if (map == PTI_MAP_0) { + /* Far jumps are treated as indirect jumps. */ + insn->iclass = ptic_far_jump; + iext->iclass = PTI_INST_JMP_EA; + } + return 0; + + case 0xEB: + if (map == PTI_MAP_0) { + insn->iclass = ptic_jump; + iext->iclass = PTI_INST_JMP_EB; + + return set_branch_target(iext, ild); + } + return 0; + + case 0xE3: + if (map == PTI_MAP_0) { + insn->iclass = ptic_cond_jump; + iext->iclass = PTI_INST_JrCXZ; + + return set_branch_target(iext, ild); + } + return 0; + + case 0xE0: + if (map == PTI_MAP_0) { + insn->iclass = ptic_cond_jump; + iext->iclass = PTI_INST_LOOPNE; + + return set_branch_target(iext, ild); + } + return 0; + + case 0xE1: + if (map == PTI_MAP_0) { + insn->iclass = ptic_cond_jump; + iext->iclass = PTI_INST_LOOPE; + + return set_branch_target(iext, ild); + } + return 0; + + case 0xE2: + if (map == PTI_MAP_0) { + insn->iclass = ptic_cond_jump; + iext->iclass = PTI_INST_LOOP; + + return set_branch_target(iext, ild); + } + return 0; + + case 0x22: + if (map == PTI_MAP_1) + if (pti_get_modrm_reg(ild) == 3) + if (!ild->u.s.rex_r) + iext->iclass = PTI_INST_MOV_CR3; + + return 0; + + case 0xC3: + if (map == PTI_MAP_0) { + insn->iclass = ptic_return; + iext->iclass = PTI_INST_RET_C3; + } + return 0; + + case 0xC2: + if (map == PTI_MAP_0) { + insn->iclass = ptic_return; + iext->iclass = PTI_INST_RET_C2; + } + return 0; + + case 0xCB: + if (map == PTI_MAP_0) { + insn->iclass = ptic_far_return; + iext->iclass = PTI_INST_RET_CB; + } + return 0; + + case 0xCA: + if (map == PTI_MAP_0) { + insn->iclass = ptic_far_return; + iext->iclass = PTI_INST_RET_CA; + } + return 0; + + case 0x05: + if (map == PTI_MAP_1) { + insn->iclass = ptic_far_call; + iext->iclass = PTI_INST_SYSCALL; + } + return 0; + + case 0x34: + if (map == PTI_MAP_1) { + insn->iclass = ptic_far_call; + iext->iclass = PTI_INST_SYSENTER; + } + return 0; + + case 0x35: + if (map == PTI_MAP_1) { + insn->iclass = ptic_far_return; + iext->iclass = PTI_INST_SYSEXIT; + } + return 0; + + case 0x07: + if (map == PTI_MAP_1) { + insn->iclass = ptic_far_return; + iext->iclass = PTI_INST_SYSRET; + } + return 0; + + case 0x01: + if (map == PTI_MAP_1) { + switch (ild->modrm_byte) { + case 0xc1: + insn->iclass = ptic_far_call; + iext->iclass = PTI_INST_VMCALL; + break; + + case 0xc2: + insn->iclass = ptic_far_return; + iext->iclass = PTI_INST_VMLAUNCH; + break; + + case 0xc3: + insn->iclass = ptic_far_return; + iext->iclass = PTI_INST_VMRESUME; + break; + + default: + break; + } + } + return 0; + + case 0xc7: + if (map == PTI_MAP_1 && + pti_get_modrm_mod(ild) != 3 && + pti_get_modrm_reg(ild) == 6) + iext->iclass = PTI_INST_VMPTRLD; + + return 0; + + default: + return 0; + } +} + +int pt_ild_decode(struct pt_insn *insn, struct pt_insn_ext *iext) +{ + struct pt_ild ild; + int size; + + if (!insn || !iext) + return -pte_internal; + + ild.mode = insn->mode; + ild.itext = insn->raw; + ild.max_bytes = insn->size; + + size = pt_instruction_length_decode(&ild); + if (size < 0) + return size; + + insn->size = (uint8_t) size; + + return pt_instruction_decode(insn, iext, &ild); +} diff --git a/libipt/src/pt_image.c b/libipt/src/pt_image.c new file mode 100644 index 0000000..5c355bc --- /dev/null +++ b/libipt/src/pt_image.c @@ -0,0 +1,974 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_image.h" +#include "pt_section.h" +#include "pt_asid.h" +#include "pt_image_section_cache.h" + +#include +#include + + +static char *dupstr(const char *str) +{ + char *dup; + size_t len; + + if (!str) + return NULL; + + len = strlen(str); + dup = malloc(len + 1); + if (!dup) + return NULL; + + return strcpy(dup, str); +} + +static struct pt_section_list *pt_mk_section_list(struct pt_section *section, + const struct pt_asid *asid, + uint64_t vaddr, int isid) +{ + struct pt_section_list *list; + int errcode; + + list = malloc(sizeof(*list)); + if (!list) + return NULL; + + memset(list, 0, sizeof(*list)); + + errcode = pt_section_get(section); + if (errcode < 0) + goto out_mem; + + pt_msec_init(&list->section, section, asid, vaddr); + list->isid = isid; + + return list; + +out_mem: + free(list); + return NULL; +} + +static void pt_section_list_free(struct pt_section_list *list) +{ + if (!list) + return; + + if (list->mapped) + pt_section_unmap(list->section.section); + pt_section_put(list->section.section); + pt_msec_fini(&list->section); + free(list); +} + +static void pt_section_list_free_tail(struct pt_section_list *list) +{ + while (list) { + struct pt_section_list *trash; + + trash = list; + list = list->next; + + pt_section_list_free(trash); + } +} + +void pt_image_init(struct pt_image *image, const char *name) +{ + if (!image) + return; + + memset(image, 0, sizeof(*image)); + + image->name = dupstr(name); + image->cache = 10; +} + +void pt_image_fini(struct pt_image *image) +{ + if (!image) + return; + + pt_section_list_free_tail(image->sections); + free(image->name); + + memset(image, 0, sizeof(*image)); +} + +struct pt_image *pt_image_alloc(const char *name) +{ + struct pt_image *image; + + image = malloc(sizeof(*image)); + if (image) + pt_image_init(image, name); + + return image; +} + +void pt_image_free(struct pt_image *image) +{ + pt_image_fini(image); + free(image); +} + +const char *pt_image_name(const struct pt_image *image) +{ + if (!image) + return NULL; + + return image->name; +} + +static int pt_image_clone(struct pt_section_list **list, + const struct pt_mapped_section *msec, + uint64_t begin, uint64_t end, int isid) +{ + const struct pt_asid *masid; + struct pt_section_list *next; + struct pt_section *section, *sec; + uint64_t mbegin, sbegin, offset, size; + int errcode; + + if (!list || !msec) + return -pte_internal; + + sec = pt_msec_section(msec); + masid = pt_msec_asid(msec); + mbegin = pt_msec_begin(msec); + sbegin = pt_section_offset(sec); + + if (end <= begin) + return -pte_internal; + + if (begin < mbegin) + return -pte_internal; + + offset = begin - mbegin; + size = end - begin; + + errcode = pt_section_clone(§ion, sec, sbegin + offset, size); + if (errcode < 0) + return errcode; + + next = pt_mk_section_list(section, masid, begin, isid); + if (!next) { + (void) pt_section_put(section); + + return -pte_nomem; + } + + /* The image list got its own reference; let's drop ours. */ + errcode = pt_section_put(section); + if (errcode < 0) { + pt_section_list_free(next); + + return errcode; + } + + /* Add the new section. */ + next->next = *list; + *list = next; + + return 0; +} + +int pt_image_add(struct pt_image *image, struct pt_section *section, + const struct pt_asid *asid, uint64_t vaddr, int isid) +{ + struct pt_section_list **list, *next, *removed; + uint64_t begin, end; + int errcode; + + if (!image || !section) + return -pte_internal; + + next = pt_mk_section_list(section, asid, vaddr, isid); + if (!next) + return -pte_nomem; + + removed = NULL; + errcode = 0; + + begin = vaddr; + end = begin + pt_section_size(section); + + /* Check for overlaps while we move to the end of the list. */ + list = &(image->sections); + while (*list) { + const struct pt_mapped_section *msec; + const struct pt_asid *masid; + struct pt_section_list *current; + struct pt_section *lsec; + uint64_t lbegin, lend; + + current = *list; + msec = ¤t->section; + masid = pt_msec_asid(msec); + + errcode = pt_asid_match(masid, asid); + if (errcode < 0) + break; + + if (!errcode) { + list = &((*list)->next); + continue; + } + + lbegin = pt_msec_begin(msec); + lend = pt_msec_end(msec); + + if ((end <= lbegin) || (lend <= begin)) { + list = &((*list)->next); + continue; + } + + /* The new section overlaps with @msec's section. */ + lsec = pt_msec_section(msec); + + /* Let's check for an identical overlap that may be the result + * of repeatedly copying images or repeatedly adding the same + * file. + */ + if ((begin == lbegin) && (end == lend) && + (isid == current->isid)) { + const char *fname, *lfname; + + fname = pt_section_filename(section); + lfname = pt_section_filename(lsec); + + if (!fname || !lfname) { + errcode = -pte_internal; + break; + } + + if (strcmp(fname, lfname) == 0) { + /* There should not have been any removals or + * additions. + */ + if (removed || next->next) { + errcode = -pte_internal; + break; + } + + pt_section_list_free(next); + return 0; + } + } + + /* We remove @msec and insert new sections for the remaining + * parts, if any. Those new sections are not mapped initially + * and need to be added to the end of the section list. + */ + *list = current->next; + + /* Keep a list of removed sections so we can re-add them in case + * of errors. + */ + current->next = removed; + removed = current; + + /* Unmap the removed section. If we need to re-add it, it will + * be moved to the end of the section list where the unmapped + * sections are. + */ + if (current->mapped) { + pt_section_unmap(lsec); + current->mapped = 0; + } + + /* Add a section covering the remaining bytes at the front. + * + * We preserve the section identifier to indicate that the new + * section originated from the original section. + */ + if (lbegin < begin) { + errcode = pt_image_clone(&next, msec, lbegin, begin, + current->isid); + if (errcode < 0) + break; + } + + /* Add a section covering the remaining bytes at the back. + * + * We preserve the section identifier to indicate that the new + * section originated from the original section. + */ + if (end < lend) { + errcode = pt_image_clone(&next, msec, end, lend, + current->isid); + if (errcode < 0) + break; + } + } + + if (errcode < 0) { + pt_section_list_free_tail(next); + + /* Re-add removed sections to the tail of the section list. */ + for (; *list; list = &((*list)->next)) + ; + + *list = removed; + return errcode; + } + + pt_section_list_free_tail(removed); + + *list = next; + return 0; +} + +int pt_image_remove(struct pt_image *image, struct pt_section *section, + const struct pt_asid *asid, uint64_t vaddr) +{ + struct pt_section_list **list; + + if (!image || !section) + return -pte_internal; + + for (list = &image->sections; *list; list = &((*list)->next)) { + struct pt_mapped_section *msec; + const struct pt_section *sec; + const struct pt_asid *masid; + struct pt_section_list *trash; + uint64_t begin; + int errcode; + + trash = *list; + msec = &trash->section; + masid = pt_msec_asid(msec); + + errcode = pt_asid_match(masid, asid); + if (errcode < 0) + return errcode; + + if (!errcode) + continue; + + begin = pt_msec_begin(msec); + sec = pt_msec_section(msec); + if (sec == section && begin == vaddr) { + *list = trash->next; + pt_section_list_free(trash); + + return 0; + } + } + + return -pte_bad_image; +} + +int pt_image_add_file(struct pt_image *image, const char *filename, + uint64_t offset, uint64_t size, + const struct pt_asid *uasid, uint64_t vaddr) +{ + struct pt_section *section; + struct pt_asid asid; + int errcode; + + if (!image || !filename) + return -pte_invalid; + + errcode = pt_asid_from_user(&asid, uasid); + if (errcode < 0) + return errcode; + + section = pt_mk_section(filename, offset, size); + if (!section) + return -pte_invalid; + + errcode = pt_image_add(image, section, &asid, vaddr, 0); + if (errcode < 0) { + (void) pt_section_put(section); + return errcode; + } + + /* The image list got its own reference; let's drop ours. */ + errcode = pt_section_put(section); + if (errcode < 0) + return errcode; + + return 0; +} + +int pt_image_copy(struct pt_image *image, const struct pt_image *src) +{ + struct pt_section_list *list; + int ignored; + + if (!image || !src) + return -pte_invalid; + + ignored = 0; + for (list = src->sections; list; list = list->next) { + int errcode; + + errcode = pt_image_add(image, list->section.section, + &list->section.asid, + list->section.vaddr, + list->isid); + if (errcode < 0) + ignored += 1; + } + + return ignored; +} + +int pt_image_remove_by_filename(struct pt_image *image, const char *filename, + const struct pt_asid *uasid) +{ + struct pt_section_list **list; + struct pt_asid asid; + int errcode, removed; + + if (!image || !filename) + return -pte_invalid; + + errcode = pt_asid_from_user(&asid, uasid); + if (errcode < 0) + return errcode; + + removed = 0; + for (list = &image->sections; *list;) { + struct pt_mapped_section *msec; + const struct pt_section *sec; + const struct pt_asid *masid; + struct pt_section_list *trash; + const char *tname; + + trash = *list; + msec = &trash->section; + masid = pt_msec_asid(msec); + + errcode = pt_asid_match(masid, &asid); + if (errcode < 0) + return errcode; + + if (!errcode) { + list = &trash->next; + continue; + } + + sec = pt_msec_section(msec); + tname = pt_section_filename(sec); + + if (tname && (strcmp(tname, filename) == 0)) { + *list = trash->next; + pt_section_list_free(trash); + + removed += 1; + } else + list = &trash->next; + } + + return removed; +} + +int pt_image_remove_by_asid(struct pt_image *image, + const struct pt_asid *uasid) +{ + struct pt_section_list **list; + struct pt_asid asid; + int errcode, removed; + + if (!image) + return -pte_invalid; + + errcode = pt_asid_from_user(&asid, uasid); + if (errcode < 0) + return errcode; + + removed = 0; + for (list = &image->sections; *list;) { + struct pt_mapped_section *msec; + const struct pt_asid *masid; + struct pt_section_list *trash; + + trash = *list; + msec = &trash->section; + masid = pt_msec_asid(msec); + + errcode = pt_asid_match(masid, &asid); + if (errcode < 0) + return errcode; + + if (!errcode) { + list = &trash->next; + continue; + } + + *list = trash->next; + pt_section_list_free(trash); + + removed += 1; + } + + return removed; +} + +int pt_image_set_callback(struct pt_image *image, + read_memory_callback_t *callback, void *context) +{ + if (!image) + return -pte_invalid; + + image->readmem.callback = callback; + image->readmem.context = context; + + return 0; +} + +static int pt_image_prune_cache(struct pt_image *image) +{ + struct pt_section_list *list; + uint16_t cache, mapped; + int status; + + if (!image) + return -pte_internal; + + cache = image->cache; + status = 0; + mapped = 0; + for (list = image->sections; list; list = list->next) { + int errcode; + + /* Let's traverse the entire list. It isn't very long and + * this allows us to fix up any previous unmap errors. + */ + if (!list->mapped) + continue; + + mapped += 1; + if (mapped <= cache) + continue; + + errcode = pt_section_unmap(list->section.section); + if (errcode < 0) { + status = errcode; + continue; + } + + list->mapped = 0; + mapped -= 1; + } + + image->mapped = mapped; + return status; +} + +static int pt_image_read_callback(struct pt_image *image, int *isid, + uint8_t *buffer, uint16_t size, + const struct pt_asid *asid, uint64_t addr) +{ + read_memory_callback_t *callback; + + if (!image || !isid) + return -pte_internal; + + callback = image->readmem.callback; + if (!callback) + return -pte_nomap; + + *isid = 0; + + return callback(buffer, size, asid, addr, image->readmem.context); +} + +/* Check whether a mapped section contains an address. + * + * Returns zero if @msec contains @vaddr. + * Returns a negative error code otherwise. + * Returns -pte_nomap if @msec does not contain @vaddr. + */ +static inline int pt_image_check_msec(const struct pt_mapped_section *msec, + const struct pt_asid *asid, + uint64_t vaddr) +{ + const struct pt_asid *masid; + uint64_t begin, end; + int errcode; + + if (!msec) + return -pte_internal; + + begin = pt_msec_begin(msec); + end = pt_msec_end(msec); + if (vaddr < begin || end <= vaddr) + return -pte_nomap; + + masid = pt_msec_asid(msec); + errcode = pt_asid_match(masid, asid); + if (errcode <= 0) { + if (!errcode) + errcode = -pte_nomap; + + return errcode; + } + + return 0; +} + +/* Read memory from a mapped section. + * + * @msec's section must be mapped. + * + * Returns the number of bytes read on success. + * Returns a negative error code otherwise. + */ +static int pt_image_read_msec(uint8_t *buffer, uint16_t size, + const struct pt_mapped_section *msec, + uint64_t addr) +{ + struct pt_section *section; + uint64_t offset; + + if (!msec) + return -pte_internal; + + section = pt_msec_section(msec); + offset = pt_msec_unmap(msec, addr); + + return pt_section_read(section, buffer, size, offset); +} + +/* Find the section containing a given address in a given address space. + * + * On success, the found section is moved to the front of the section list. + * If caching is enabled, maps the section. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_image_fetch_section(struct pt_image *image, + const struct pt_asid *asid, uint64_t vaddr) +{ + struct pt_section_list **start, **list; + + if (!image) + return -pte_internal; + + start = &image->sections; + for (list = start; *list;) { + struct pt_mapped_section *msec; + struct pt_section_list *elem; + int errcode; + + elem = *list; + msec = &elem->section; + + errcode = pt_image_check_msec(msec, asid, vaddr); + if (errcode < 0) { + if (errcode != -pte_nomap) + return errcode; + + list = &elem->next; + continue; + } + + /* Move the section to the front if it isn't already. */ + if (list != start) { + *list = elem->next; + elem->next = *start; + *start = elem; + } + + /* Map the section if it isn't already - provided we do cache + * recently used sections. + */ + if (!elem->mapped) { + uint16_t cache, already; + + already = image->mapped; + cache = image->cache; + if (cache) { + struct pt_section *section; + + section = pt_msec_section(msec); + + errcode = pt_section_map(section); + if (errcode < 0) + return errcode; + + elem->mapped = 1; + + already += 1; + image->mapped = already; + + if (cache < already) + return pt_image_prune_cache(image); + } + } + + return 0; + } + + return -pte_nomap; +} + +static int pt_image_read_cold(struct pt_image *image, int *isid, + uint8_t *buffer, uint16_t size, + const struct pt_asid *asid, uint64_t addr) +{ + struct pt_mapped_section *msec; + struct pt_section_list *section; + int errcode; + + if (!image || !isid) + return -pte_internal; + + errcode = pt_image_fetch_section(image, asid, addr); + if (errcode < 0) { + if (errcode != -pte_nomap) + return errcode; + } + + section = image->sections; + if (!section) + return pt_image_read_callback(image, isid, buffer, size, asid, + addr); + + msec = §ion->section; + + errcode = pt_image_check_msec(msec, asid, addr); + if (errcode < 0) { + if (errcode != -pte_nomap) + return errcode; + + return pt_image_read_callback(image, isid, buffer, size, asid, + addr); + } + + *isid = section->isid; + + if (section->mapped) + return pt_image_read_msec(buffer, size, msec, addr); + else { + struct pt_section *sec; + int status; + + sec = pt_msec_section(msec); + + errcode = pt_section_map(sec); + if (errcode < 0) + return errcode; + + status = pt_image_read_msec(buffer, size, msec, addr); + + errcode = pt_section_unmap(sec); + if (errcode < 0) + return errcode; + + return status; + } +} + + +int pt_image_read(struct pt_image *image, int *isid, uint8_t *buffer, + uint16_t size, const struct pt_asid *asid, uint64_t addr) +{ + struct pt_mapped_section *msec; + struct pt_section_list *section; + int errcode; + + if (!image || !isid) + return -pte_internal; + + section = image->sections; + if (!section) + return pt_image_read_callback(image, isid, buffer, size, asid, + addr); + + if (!section->mapped) + return pt_image_read_cold(image, isid, buffer, size, asid, + addr); + + msec = §ion->section; + + errcode = pt_image_check_msec(msec, asid, addr); + if (errcode < 0) { + if (errcode != -pte_nomap) + return errcode; + + return pt_image_read_cold(image, isid, buffer, size, asid, + addr); + } + + *isid = section->isid; + + return pt_image_read_msec(buffer, size, msec, addr); +} + +int pt_image_add_cached(struct pt_image *image, + struct pt_image_section_cache *iscache, int isid, + const struct pt_asid *uasid) +{ + struct pt_section *section; + struct pt_asid asid; + uint64_t vaddr; + int errcode, status; + + if (!image || !iscache) + return -pte_invalid; + + errcode = pt_iscache_lookup(iscache, §ion, &vaddr, isid); + if (errcode < 0) + return errcode; + + errcode = pt_asid_from_user(&asid, uasid); + if (errcode < 0) + return errcode; + + status = pt_image_add(image, section, &asid, vaddr, isid); + + /* We grab a reference when we add the section. Drop the one we + * obtained from cache lookup. + */ + errcode = pt_section_put(section); + if (errcode < 0) + return errcode; + + return status; +} + +static int pt_image_find_cold(struct pt_image *image, + struct pt_section **psection, uint64_t *laddr, + const struct pt_asid *asid, uint64_t vaddr) +{ + struct pt_mapped_section *msec; + struct pt_section_list *slist; + struct pt_section *section; + int errcode; + + if (!image || !psection || !laddr) + return -pte_internal; + + errcode = pt_image_fetch_section(image, asid, vaddr); + if (errcode < 0) + return errcode; + + slist = image->sections; + if (!slist) + return -pte_nomap; + + msec = &slist->section; + + errcode = pt_image_check_msec(msec, asid, vaddr); + if (errcode < 0) + return errcode; + + section = pt_msec_section(msec); + + errcode = pt_section_get(section); + if (errcode < 0) + return errcode; + + *psection = section; + *laddr = pt_msec_begin(msec); + + return slist->isid; +} + +int pt_image_find(struct pt_image *image, struct pt_section **psection, + uint64_t *laddr, const struct pt_asid *asid, uint64_t vaddr) +{ + struct pt_mapped_section *msec; + struct pt_section_list *slist; + struct pt_section *section; + int errcode; + + if (!image || !psection || !laddr) + return -pte_internal; + + slist = image->sections; + if (!slist) + return -pte_nomap; + + if (!slist->mapped) + return pt_image_find_cold(image, psection, laddr, asid, vaddr); + + msec = &slist->section; + + errcode = pt_image_check_msec(msec, asid, vaddr); + if (errcode < 0) { + if (errcode != -pte_nomap) + return errcode; + + return pt_image_find_cold(image, psection, laddr, asid, vaddr); + } + + section = pt_msec_section(msec); + + errcode = pt_section_get(section); + if (errcode < 0) + return errcode; + + *psection = section; + *laddr = pt_msec_begin(msec); + + return slist->isid; +} + +int pt_image_validate(const struct pt_image *image, const struct pt_asid *asid, + uint64_t vaddr, const struct pt_section *section, + uint64_t laddr, int isid) +{ + struct pt_mapped_section *msec; + struct pt_section_list *slist; + + if (!image) + return -pte_internal; + + /* We only look at the top of our LRU stack and accept sporadic + * validation fails if @section moved down in the LRU stack or has been + * evicted. + * + * A failed validation requires decoders to re-fetch the section so it + * only results in a (relatively small) performance loss. + */ + slist = image->sections; + if (!slist) + return -pte_nomap; + + if (slist->isid != isid) + return -pte_nomap; + + msec = &slist->section; + + if (pt_msec_section(msec) != section) + return -pte_nomap; + + if (pt_msec_begin(msec) != laddr) + return -pte_nomap; + + return pt_image_check_msec(msec, asid, vaddr); +} diff --git a/libipt/src/pt_image_section_cache.c b/libipt/src/pt_image_section_cache.c new file mode 100644 index 0000000..a646dfa --- /dev/null +++ b/libipt/src/pt_image_section_cache.c @@ -0,0 +1,532 @@ +/* + * Copyright (c) 2016-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_image_section_cache.h" +#include "pt_section.h" + +#include "intel-pt.h" + +#include + + +static char *dupstr(const char *str) +{ + char *dup; + size_t len; + + if (!str) + return NULL; + + len = strlen(str); + dup = malloc(len + 1); + if (!dup) + return NULL; + + return strcpy(dup, str); +} + +int pt_iscache_init(struct pt_image_section_cache *iscache, const char *name) +{ + if (!iscache) + return -pte_internal; + + memset(iscache, 0, sizeof(*iscache)); + if (name) { + iscache->name = dupstr(name); + if (!iscache->name) + return -pte_nomem; + } + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_init(&iscache->lock, mtx_plain); + if (errcode != thrd_success) + return -pte_bad_lock; + } +#endif /* defined(FEATURE_THREADS) */ + + return 0; +} + +void pt_iscache_fini(struct pt_image_section_cache *iscache) +{ + if (!iscache) + return; + + (void) pt_iscache_clear(iscache); + free(iscache->name); + +#if defined(FEATURE_THREADS) + + mtx_destroy(&iscache->lock); + +#endif /* defined(FEATURE_THREADS) */ +} + +static inline int pt_iscache_lock(struct pt_image_section_cache *iscache) +{ + if (!iscache) + return -pte_internal; + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_lock(&iscache->lock); + if (errcode != thrd_success) + return -pte_bad_lock; + } +#endif /* defined(FEATURE_THREADS) */ + + return 0; +} + +static inline int pt_iscache_unlock(struct pt_image_section_cache *iscache) +{ + if (!iscache) + return -pte_internal; + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_unlock(&iscache->lock); + if (errcode != thrd_success) + return -pte_bad_lock; + } +#endif /* defined(FEATURE_THREADS) */ + + return 0; +} + +static inline int isid_from_index(uint16_t index) +{ + return index + 1; +} + +static int pt_iscache_expand(struct pt_image_section_cache *iscache) +{ + struct pt_iscache_entry *entries; + uint16_t capacity, target; + + if (!iscache) + return -pte_internal; + + capacity = iscache->capacity; + target = capacity + 8; + + /* Check for overflows. */ + if (target < capacity) + return -pte_nomem; + + entries = realloc(iscache->entries, target * sizeof(*entries)); + if (!entries) + return -pte_nomem; + + iscache->capacity = target; + iscache->entries = entries; + return 0; +} + +static int pt_iscache_find_locked(struct pt_image_section_cache *iscache, + const char *filename, uint64_t offset, + uint64_t size, uint64_t laddr) +{ + uint16_t idx, end; + + if (!iscache || !filename) + return -pte_internal; + + end = iscache->size; + for (idx = 0; idx < end; ++idx) { + const struct pt_iscache_entry *entry; + const struct pt_section *section; + const char *sec_filename; + uint64_t sec_offset, sec_size; + + entry = &iscache->entries[idx]; + + /* We do not zero-initialize the array - a NULL check is + * pointless. + */ + section = entry->section; + sec_filename = pt_section_filename(section); + sec_offset = pt_section_offset(section); + sec_size = pt_section_size(section); + + if (entry->laddr != laddr) + continue; + + if (sec_offset != offset) + continue; + + if (sec_size != size) + continue; + + /* We should not have a section without a filename. */ + if (!sec_filename) + return -pte_internal; + + if (strcmp(sec_filename, filename) != 0) + continue; + + return isid_from_index(idx); + } + + return 0; +} + +static int section_match(const struct pt_section *lhs, + const struct pt_section *rhs) +{ + const char *lfilename, *rfilename; + + if (!lhs || !rhs) + return -pte_internal; + + if (pt_section_offset(lhs) != pt_section_offset(rhs)) + return 0; + + if (pt_section_size(lhs) != pt_section_size(rhs)) + return 0; + + lfilename = pt_section_filename(lhs); + rfilename = pt_section_filename(rhs); + + if (!lfilename || !rfilename) + return -pte_internal; + + if (strcmp(lfilename, rfilename) != 0) + return 0; + + return 1; +} + +int pt_iscache_add(struct pt_image_section_cache *iscache, + struct pt_section *section, uint64_t laddr) +{ + uint16_t idx, end; + int errcode; + + if (!iscache || !section) + return -pte_internal; + + /* We must have a filename for @section. */ + if (!pt_section_filename(section)) + return -pte_internal; + + errcode = pt_iscache_lock(iscache); + if (errcode < 0) + return errcode; + + end = iscache->size; + for (idx = 0; idx < end; ++idx) { + const struct pt_iscache_entry *entry; + struct pt_section *sec; + + entry = &iscache->entries[idx]; + + /* We do not zero-initialize the array - a NULL check is + * pointless. + */ + sec = entry->section; + + errcode = section_match(section, sec); + if (errcode <= 0) { + if (errcode < 0) + goto out_unlock; + + continue; + } + + /* Use the cached section instead of the argument section. + * + * We'll be able to drop the argument section in this case and + * only keep one copy around and, more importantly, mapped. + */ + section = sec; + + /* If we also find a matching load address, we're done. */ + if (laddr == entry->laddr) + break; + } + + /* If we have not found a matching entry, add one. */ + if (idx == end) { + struct pt_iscache_entry *entry; + + /* Expand the cache, if necessary. */ + if (iscache->capacity <= iscache->size) { + /* We must never exceed the capacity. */ + if (iscache->capacity < iscache->size) { + errcode = -pte_internal; + goto out_unlock; + } + + errcode = pt_iscache_expand(iscache); + if (errcode < 0) + goto out_unlock; + + /* Make sure it is big enough, now. */ + if (iscache->capacity <= iscache->size) { + errcode = -pte_internal; + goto out_unlock; + } + } + + errcode = pt_section_get(section); + if (errcode < 0) + goto out_unlock; + + idx = iscache->size++; + + entry = &iscache->entries[idx]; + entry->section = section; + entry->laddr = laddr; + } + + errcode = pt_iscache_unlock(iscache); + if (errcode < 0) + return errcode; + + return isid_from_index(idx); + + out_unlock: + (void) pt_iscache_unlock(iscache); + return errcode; +} + +int pt_iscache_find(struct pt_image_section_cache *iscache, + const char *filename, uint64_t offset, uint64_t size, + uint64_t laddr) +{ + int errcode, isid; + + errcode = pt_iscache_lock(iscache); + if (errcode < 0) + return errcode; + + isid = pt_iscache_find_locked(iscache, filename, offset, size, laddr); + + errcode = pt_iscache_unlock(iscache); + if (errcode < 0) + return errcode; + + return isid; +} + +int pt_iscache_lookup(struct pt_image_section_cache *iscache, + struct pt_section **section, uint64_t *laddr, int isid) +{ + uint16_t index; + int errcode, status; + + if (!iscache || !section || !laddr) + return -pte_internal; + + if (isid <= 0) + return -pte_bad_image; + + isid -= 1; + if (isid > UINT16_MAX) + return -pte_internal; + + index = (uint16_t) isid; + + errcode = pt_iscache_lock(iscache); + if (errcode < 0) + return errcode; + + if (iscache->size <= index) + status = -pte_bad_image; + else { + const struct pt_iscache_entry *entry; + + entry = &iscache->entries[index]; + *section = entry->section; + *laddr = entry->laddr; + + status = pt_section_get(*section); + } + + errcode = pt_iscache_unlock(iscache); + if (errcode < 0) + return errcode; + + return status; +} + +int pt_iscache_clear(struct pt_image_section_cache *iscache) +{ + struct pt_iscache_entry *entries; + uint16_t idx, end; + int errcode; + + if (!iscache) + return -pte_internal; + + errcode = pt_iscache_lock(iscache); + if (errcode < 0) + return errcode; + + entries = iscache->entries; + end = iscache->size; + + iscache->entries = NULL; + iscache->capacity = 0; + iscache->size = 0; + + errcode = pt_iscache_unlock(iscache); + if (errcode < 0) + return errcode; + + for (idx = 0; idx < end; ++idx) { + const struct pt_iscache_entry *entry; + + entry = &entries[idx]; + + /* We do not zero-initialize the array - a NULL check is + * pointless. + */ + errcode = pt_section_put(entry->section); + if (errcode < 0) + return errcode; + } + + free(entries); + return 0; +} + +struct pt_image_section_cache *pt_iscache_alloc(const char *name) +{ + struct pt_image_section_cache *iscache; + + iscache = malloc(sizeof(*iscache)); + if (iscache) + pt_iscache_init(iscache, name); + + return iscache; +} + +void pt_iscache_free(struct pt_image_section_cache *iscache) +{ + if (!iscache) + return; + + pt_iscache_fini(iscache); + free(iscache); +} + +const char *pt_iscache_name(const struct pt_image_section_cache *iscache) +{ + if (!iscache) + return NULL; + + return iscache->name; +} + +int pt_iscache_add_file(struct pt_image_section_cache *iscache, + const char *filename, uint64_t offset, uint64_t size, + uint64_t vaddr) +{ + struct pt_section *section; + int isid, errcode; + + if (!iscache || !filename) + return -pte_invalid; + + isid = pt_iscache_find(iscache, filename, offset, size, vaddr); + if (isid != 0) + return isid; + + section = pt_mk_section(filename, offset, size); + if (!section) + return -pte_invalid; + + isid = pt_iscache_add(iscache, section, vaddr); + + /* We grab a reference when we add the section. Drop the one we + * obtained when creating the section. + */ + errcode = pt_section_put(section); + if (errcode < 0) + return errcode; + + return isid; +} + + +int pt_iscache_read(struct pt_image_section_cache *iscache, uint8_t *buffer, + uint64_t size, int isid, uint64_t vaddr) +{ + struct pt_section *section; + uint64_t laddr; + int errcode, status; + + if (!iscache || !buffer || !size) + return -pte_invalid; + + errcode = pt_iscache_lookup(iscache, §ion, &laddr, isid); + if (errcode < 0) + return errcode; + + if (vaddr < laddr) { + (void) pt_section_put(section); + return -pte_nomap; + } + + vaddr -= laddr; + + errcode = pt_section_map(section); + if (errcode < 0) { + (void) pt_section_put(section); + return errcode; + } + + /* We truncate the read if it gets too big. The user is expected to + * issue further reads for the remaining part. + */ + if (UINT16_MAX < size) + size = UINT16_MAX; + + status = pt_section_read(section, buffer, (uint16_t) size, vaddr); + + errcode = pt_section_unmap(section); + if (errcode < 0) { + (void) pt_section_put(section); + return errcode; + } + + errcode = pt_section_put(section); + if (errcode < 0) + return errcode; + + return status; +} diff --git a/libipt/src/pt_insn.c b/libipt/src/pt_insn.c new file mode 100644 index 0000000..104f287 --- /dev/null +++ b/libipt/src/pt_insn.c @@ -0,0 +1,349 @@ +/* + * Copyright (c) 2016-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_insn.h" +#include "pt_ild.h" +#include "pt_image.h" + +#include "intel-pt.h" + + +int pt_insn_changes_cpl(const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + (void) insn; + + if (!iext) + return 0; + + switch (iext->iclass) { + default: + return 0; + + case PTI_INST_INT: + case PTI_INST_INT3: + case PTI_INST_INT1: + case PTI_INST_INTO: + case PTI_INST_IRET: + case PTI_INST_SYSCALL: + case PTI_INST_SYSENTER: + case PTI_INST_SYSEXIT: + case PTI_INST_SYSRET: + return 1; + } +} + +int pt_insn_changes_cr3(const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + (void) insn; + + if (!iext) + return 0; + + switch (iext->iclass) { + default: + return 0; + + case PTI_INST_MOV_CR3: + return 1; + } +} + +int pt_insn_is_branch(const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + (void) iext; + + if (!insn) + return 0; + + switch (insn->iclass) { + default: + return 0; + + case ptic_call: + case ptic_return: + case ptic_jump: + case ptic_cond_jump: + case ptic_far_call: + case ptic_far_return: + case ptic_far_jump: + return 1; + } +} + +int pt_insn_is_far_branch(const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + (void) iext; + + if (!insn) + return 0; + + switch (insn->iclass) { + default: + return 0; + + case ptic_far_call: + case ptic_far_return: + case ptic_far_jump: + return 1; + } +} + +int pt_insn_binds_to_pip(const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + if (!iext) + return 0; + + switch (iext->iclass) { + default: + return pt_insn_is_far_branch(insn, iext); + + case PTI_INST_MOV_CR3: + case PTI_INST_VMLAUNCH: + case PTI_INST_VMRESUME: + return 1; + } +} + +int pt_insn_binds_to_vmcs(const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + if (!iext) + return 0; + + switch (iext->iclass) { + default: + return pt_insn_is_far_branch(insn, iext); + + case PTI_INST_VMPTRLD: + case PTI_INST_VMLAUNCH: + case PTI_INST_VMRESUME: + return 1; + } +} + +int pt_insn_next_ip(uint64_t *pip, const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + uint64_t ip; + + if (!insn || !iext) + return -pte_internal; + + ip = insn->ip + insn->size; + + switch (insn->iclass) { + case ptic_other: + break; + + case ptic_call: + case ptic_jump: + if (iext->variant.branch.is_direct) { + ip += iext->variant.branch.displacement; + break; + } + + /* Fall through. */ + default: + return -pte_bad_query; + + case ptic_error: + return -pte_bad_insn; + } + + if (pip) + *pip = ip; + + return 0; +} + +/* Retry decoding an instruction after a preceding decode error. + * + * Instruction length decode typically fails due to 'not enough + * bytes'. + * + * This may be caused by partial updates of text sections + * represented via new image sections overlapping the original + * text section's image section. We stop reading memory at the + * end of the section so we do not read the full instruction if + * parts of it have been overwritten by the update. + * + * Try to read the remaining bytes and decode the instruction again. If we + * succeed, set @insn->truncated to indicate that the instruction is truncated + * in @insn->isid. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_bad_insn if the instruction could not be decoded. + */ +static int pt_insn_decode_retry(struct pt_insn *insn, struct pt_insn_ext *iext, + struct pt_image *image, + const struct pt_asid *asid) +{ + int size, errcode, isid; + uint8_t isize, remaining; + + if (!insn) + return -pte_internal; + + isize = insn->size; + remaining = sizeof(insn->raw) - isize; + + /* We failed for real if we already read the maximum number of bytes for + * an instruction. + */ + if (!remaining) + return -pte_bad_insn; + + /* Read the remaining bytes from the image. */ + size = pt_image_read(image, &isid, &insn->raw[isize], remaining, asid, + insn->ip + isize); + if (size <= 0) { + /* We should have gotten an error if we were not able to read at + * least one byte. Check this to guarantee termination. + */ + if (!size) + return -pte_internal; + + return size; + } + + /* Add the newly read bytes to the instruction's size. */ + insn->size += (uint8_t) size; + + /* Store the new size to avoid infinite recursion in case instruction + * decode fails after length decode, which would set @insn->size to the + * actual length. + */ + size = insn->size; + + /* Try to decode the instruction again. + * + * If we fail again, we recursively retry again until we either fail to + * read more bytes or reach the maximum number of bytes for an + * instruction. + */ + errcode = pt_ild_decode(insn, iext); + if (errcode < 0) { + if (errcode != -pte_bad_insn) + return errcode; + + /* If instruction length decode already determined the size, + * there's no point in reading more bytes. + */ + if (insn->size != (uint8_t) size) + return errcode; + + return pt_insn_decode_retry(insn, iext, image, asid); + } + + /* We succeeded this time, so the instruction crosses image section + * boundaries. + * + * This poses the question which isid to use for the instruction. + * + * To reconstruct exactly this instruction at a later time, we'd need to + * store all isids involved together with the number of bytes read for + * each isid. Since @insn already provides the exact bytes for this + * instruction, we assume that the isid will be used solely for source + * correlation. In this case, it should refer to the first byte of the + * instruction - as it already does. + */ + insn->truncated = 1; + + return errcode; +} + +int pt_insn_decode(struct pt_insn *insn, struct pt_insn_ext *iext, + struct pt_image *image, const struct pt_asid *asid) +{ + int size, errcode; + + if (!insn) + return -pte_internal; + + /* Read the memory at the current IP in the current address space. */ + size = pt_image_read(image, &insn->isid, insn->raw, sizeof(insn->raw), + asid, insn->ip); + if (size < 0) + return size; + + /* We initialize @insn->size to the maximal possible size. It will be + * set to the actual size during instruction decode. + */ + insn->size = (uint8_t) size; + + errcode = pt_ild_decode(insn, iext); + if (errcode < 0) { + if (errcode != -pte_bad_insn) + return errcode; + + /* If instruction length decode already determined the size, + * there's no point in reading more bytes. + */ + if (insn->size != (uint8_t) size) + return errcode; + + return pt_insn_decode_retry(insn, iext, image, asid); + } + + return errcode; +} + +int pt_insn_range_is_contiguous(uint64_t begin, uint64_t end, + enum pt_exec_mode mode, struct pt_image *image, + const struct pt_asid *asid, size_t steps) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + + memset(&insn, 0, sizeof(insn)); + + insn.mode = mode; + insn.ip = begin; + + while (insn.ip != end) { + int errcode; + + if (!steps--) + return 0; + + errcode = pt_insn_decode(&insn, &iext, image, asid); + if (errcode < 0) + return errcode; + + errcode = pt_insn_next_ip(&insn.ip, &insn, &iext); + if (errcode < 0) + return errcode; + } + + return 1; +} diff --git a/libipt/src/pt_insn_decoder.c b/libipt/src/pt_insn_decoder.c new file mode 100644 index 0000000..b4e21bc --- /dev/null +++ b/libipt/src/pt_insn_decoder.c @@ -0,0 +1,1342 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_insn_decoder.h" +#include "pt_insn.h" +#include "pt_config.h" + +#include "intel-pt.h" + +#include +#include + + +static void pt_insn_reset(struct pt_insn_decoder *decoder) +{ + if (!decoder) + return; + + decoder->mode = ptem_unknown; + decoder->ip = 0ull; + decoder->last_disable_ip = 0ull; + decoder->status = 0; + decoder->enabled = 0; + decoder->process_event = 0; + decoder->speculative = 0; + decoder->event_may_change_ip = 1; + + pt_retstack_init(&decoder->retstack); + pt_asid_init(&decoder->asid); +} + +/* Initialize the query decoder flags based on our flags. */ + +static int pt_insn_init_qry_flags(struct pt_conf_flags *qflags, + const struct pt_conf_flags *flags) +{ + if (!qflags || !flags) + return -pte_internal; + + memset(qflags, 0, sizeof(*qflags)); + + return 0; +} + +int pt_insn_decoder_init(struct pt_insn_decoder *decoder, + const struct pt_config *uconfig) +{ + struct pt_config config; + int errcode; + + if (!decoder) + return -pte_internal; + + errcode = pt_config_from_user(&config, uconfig); + if (errcode < 0) + return errcode; + + /* The user supplied decoder flags. */ + decoder->flags = config.flags; + + /* Set the flags we need for the query decoder we use. */ + errcode = pt_insn_init_qry_flags(&config.flags, &decoder->flags); + if (errcode < 0) + return errcode; + + errcode = pt_qry_decoder_init(&decoder->query, &config); + if (errcode < 0) + return errcode; + + pt_image_init(&decoder->default_image, NULL); + decoder->image = &decoder->default_image; + + pt_insn_reset(decoder); + + return 0; +} + +void pt_insn_decoder_fini(struct pt_insn_decoder *decoder) +{ + if (!decoder) + return; + + pt_image_fini(&decoder->default_image); + pt_qry_decoder_fini(&decoder->query); +} + +struct pt_insn_decoder *pt_insn_alloc_decoder(const struct pt_config *config) +{ + struct pt_insn_decoder *decoder; + int errcode; + + decoder = malloc(sizeof(*decoder)); + if (!decoder) + return NULL; + + errcode = pt_insn_decoder_init(decoder, config); + if (errcode < 0) { + free(decoder); + return NULL; + } + + return decoder; +} + +void pt_insn_free_decoder(struct pt_insn_decoder *decoder) +{ + if (!decoder) + return; + + pt_insn_decoder_fini(decoder); + free(decoder); +} + +static int pt_insn_start(struct pt_insn_decoder *decoder, int status) +{ + if (!decoder) + return -pte_internal; + + if (status < 0) + return status; + + decoder->status = status; + + if (!(status & pts_ip_suppressed)) + decoder->enabled = 1; + + return 0; +} + +int pt_insn_sync_forward(struct pt_insn_decoder *decoder) +{ + int status; + + if (!decoder) + return -pte_invalid; + + pt_insn_reset(decoder); + + status = pt_qry_sync_forward(&decoder->query, &decoder->ip); + + return pt_insn_start(decoder, status); +} + +int pt_insn_sync_backward(struct pt_insn_decoder *decoder) +{ + int status; + + if (!decoder) + return -pte_invalid; + + pt_insn_reset(decoder); + + status = pt_qry_sync_backward(&decoder->query, &decoder->ip); + + return pt_insn_start(decoder, status); +} + +int pt_insn_sync_set(struct pt_insn_decoder *decoder, uint64_t offset) +{ + int status; + + if (!decoder) + return -pte_invalid; + + pt_insn_reset(decoder); + + status = pt_qry_sync_set(&decoder->query, &decoder->ip, offset); + + return pt_insn_start(decoder, status); +} + +int pt_insn_get_offset(struct pt_insn_decoder *decoder, uint64_t *offset) +{ + if (!decoder) + return -pte_invalid; + + return pt_qry_get_offset(&decoder->query, offset); +} + +int pt_insn_get_sync_offset(struct pt_insn_decoder *decoder, uint64_t *offset) +{ + if (!decoder) + return -pte_invalid; + + return pt_qry_get_sync_offset(&decoder->query, offset); +} + +struct pt_image *pt_insn_get_image(struct pt_insn_decoder *decoder) +{ + if (!decoder) + return NULL; + + return decoder->image; +} + +int pt_insn_set_image(struct pt_insn_decoder *decoder, + struct pt_image *image) +{ + if (!decoder) + return -pte_invalid; + + if (!image) + image = &decoder->default_image; + + decoder->image = image; + return 0; +} + +const struct pt_config * +pt_insn_get_config(const struct pt_insn_decoder *decoder) +{ + if (!decoder) + return NULL; + + return pt_qry_get_config(&decoder->query); +} + +int pt_insn_time(struct pt_insn_decoder *decoder, uint64_t *time, + uint32_t *lost_mtc, uint32_t *lost_cyc) +{ + if (!decoder || !time) + return -pte_invalid; + + return pt_qry_time(&decoder->query, time, lost_mtc, lost_cyc); +} + +int pt_insn_core_bus_ratio(struct pt_insn_decoder *decoder, uint32_t *cbr) +{ + if (!decoder || !cbr) + return -pte_invalid; + + return pt_qry_core_bus_ratio(&decoder->query, cbr); +} + +static inline int event_pending(struct pt_insn_decoder *decoder) +{ + int status; + + if (!decoder) + return -pte_invalid; + + if (decoder->process_event) + return 1; + + status = decoder->status; + if (!(status & pts_event_pending)) + return 0; + + status = pt_qry_event(&decoder->query, &decoder->event, + sizeof(decoder->event)); + if (status < 0) + return status; + + decoder->process_event = 1; + decoder->status = status; + return 1; +} + +static int process_enabled_event(struct pt_insn_decoder *decoder, + struct pt_insn *insn) +{ + struct pt_event *ev; + + if (!decoder || !insn) + return -pte_internal; + + ev = &decoder->event; + + /* This event can't be a status update. */ + if (ev->status_update) + return -pte_bad_context; + + /* We must have an IP in order to start decoding. */ + if (ev->ip_suppressed) + return -pte_noip; + + /* We must currently be disabled. */ + if (decoder->enabled) + return -pte_bad_context; + + /* Delay processing of the event if we can't change the IP. */ + if (!decoder->event_may_change_ip) + return 0; + + decoder->ip = ev->variant.enabled.ip; + decoder->enabled = 1; + + /* Clear an indication of a preceding disable on the same + * instruction. + */ + insn->disabled = 0; + + /* Check if we resumed from a preceding disable or if we enabled at a + * different position. + * Should we ever get more than one enabled event, enabled wins. + */ + if (decoder->last_disable_ip == decoder->ip && !insn->enabled) + insn->resumed = 1; + else { + insn->enabled = 1; + insn->resumed = 0; + } + + return 1; +} + +static int process_disabled_event(struct pt_insn_decoder *decoder, + struct pt_insn *insn) +{ + struct pt_event *ev; + + if (!decoder || !insn) + return -pte_internal; + + ev = &decoder->event; + + /* This event can't be a status update. */ + if (ev->status_update) + return -pte_bad_context; + + /* We must currently be enabled. */ + if (!decoder->enabled) + return -pte_bad_context; + + decoder->enabled = 0; + insn->disabled = 1; + + return 1; +} + +static int process_async_disabled_event(struct pt_insn_decoder *decoder, + struct pt_insn *insn) +{ + int errcode; + + if (!decoder) + return -pte_internal; + + errcode = process_disabled_event(decoder, insn); + if (errcode <= 0) + return errcode; + + decoder->last_disable_ip = decoder->ip; + + return errcode; +} + +static int process_sync_disabled_event(struct pt_insn_decoder *decoder, + struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + int errcode, iperr; + + if (!decoder || !insn) + return -pte_internal; + + errcode = process_disabled_event(decoder, insn); + if (errcode <= 0) + return errcode; + + iperr = pt_insn_next_ip(&decoder->last_disable_ip, insn, iext); + if (iperr < 0) { + /* We don't know the IP on error. */ + decoder->last_disable_ip = 0ull; + + /* For indirect calls, assume that we return to the next + * instruction. + */ + if (iperr == -pte_bad_query) { + switch (insn->iclass) { + case ptic_call: + case ptic_far_call: + /* We only check the instruction class, not the + * is_direct property, since direct calls would + * have been handled by pt_insn_nex_ip() or + * would have provoked a different error. + */ + decoder->last_disable_ip = + insn->ip + insn->size; + break; + + default: + break; + } + } + } + + return errcode; +} + +static int process_async_branch_event(struct pt_insn_decoder *decoder) +{ + struct pt_event *ev; + + if (!decoder) + return -pte_internal; + + ev = &decoder->event; + + /* This event can't be a status update. */ + if (ev->status_update) + return -pte_bad_context; + + /* Tracing must be enabled in order to make sense of the event. */ + if (!decoder->enabled) + return -pte_bad_context; + + decoder->ip = ev->variant.async_branch.to; + + return 1; +} + +static int process_paging_event(struct pt_insn_decoder *decoder) +{ + struct pt_event *ev; + + if (!decoder) + return -pte_internal; + + ev = &decoder->event; + + decoder->asid.cr3 = ev->variant.paging.cr3; + + return 1; +} + +static int process_overflow_event(struct pt_insn_decoder *decoder, + struct pt_insn *insn) +{ + struct pt_event *ev; + + if (!decoder || !insn) + return -pte_internal; + + ev = &decoder->event; + + /* This event can't be a status update. */ + if (ev->status_update) + return -pte_bad_context; + + /* Delay processing of the event if we can't change the IP. */ + if (!decoder->event_may_change_ip) + return 0; + + /* We don't know the TSX state. Let's assume we execute normally. + * + * We also don't know the execution mode. Let's keep what we have + * in case we don't get an update before we have to decode the next + * instruction. + */ + decoder->speculative = 0; + + /* Disable tracing if we don't have an IP. */ + if (ev->ip_suppressed) { + /* Indicate the overflow in case tracing was enabled before. + * + * If tracing was disabled, we're not really resyncing. + */ + if (decoder->enabled) { + decoder->enabled = 0; + + /* We mark the instruction as resynced. It won't be + * returned unless we enable tracing again, in which + * case this is the labeling we want. + */ + insn->resynced = 1; + } + } else { + /* Jump to the IP at which the overflow was resolved. */ + decoder->ip = ev->variant.overflow.ip; + decoder->enabled = 1; + + insn->resynced = 1; + } + + return 1; +} + +static int process_exec_mode_event(struct pt_insn_decoder *decoder) +{ + enum pt_exec_mode mode; + struct pt_event *ev; + + if (!decoder) + return -pte_internal; + + ev = &decoder->event; + mode = ev->variant.exec_mode.mode; + + /* Use status update events to diagnose inconsistencies. */ + if (ev->status_update && decoder->enabled && + decoder->mode != ptem_unknown && decoder->mode != mode) + return -pte_bad_status_update; + + decoder->mode = mode; + + return 1; +} + +static int process_tsx_event(struct pt_insn_decoder *decoder, + struct pt_insn *insn) +{ + struct pt_event *ev; + int old_speculative; + + if (!decoder) + return -pte_internal; + + old_speculative = decoder->speculative; + ev = &decoder->event; + + decoder->speculative = ev->variant.tsx.speculative; + + if (insn && decoder->enabled) { + if (ev->variant.tsx.aborted) + insn->aborted = 1; + else if (old_speculative && !ev->variant.tsx.speculative) + insn->committed = 1; + } + + return 1; +} + +static int process_stop_event(struct pt_insn_decoder *decoder, + struct pt_insn *insn) +{ + struct pt_event *ev; + + if (!decoder) + return -pte_internal; + + ev = &decoder->event; + + /* This event can't be a status update. */ + if (ev->status_update) + return -pte_bad_context; + + /* Tracing is always disabled before it is stopped. */ + if (decoder->enabled) + return -pte_bad_context; + + if (insn) + insn->stopped = 1; + + return 1; +} + +static int process_vmcs_event(struct pt_insn_decoder *decoder) +{ + struct pt_event *ev; + + if (!decoder) + return -pte_internal; + + ev = &decoder->event; + + decoder->asid.vmcs = ev->variant.vmcs.base; + + return 1; +} + +static int check_erratum_skd022(struct pt_insn_decoder *decoder) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + int errcode; + + if (!decoder) + return -pte_internal; + + insn.mode = decoder->mode; + insn.ip = decoder->ip; + + errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid); + if (errcode < 0) + return 0; + + switch (iext.iclass) { + default: + return 0; + + case PTI_INST_VMLAUNCH: + case PTI_INST_VMRESUME: + return 1; + } +} + +static inline int handle_erratum_skd022(struct pt_insn_decoder *decoder) +{ + struct pt_event *ev; + uint64_t ip; + int errcode; + + if (!decoder) + return -pte_internal; + + errcode = check_erratum_skd022(decoder); + if (errcode <= 0) + return errcode; + + /* We turn the async disable into a sync disable. It will be processed + * after decoding the instruction. + */ + ev = &decoder->event; + + ip = ev->variant.async_disabled.ip; + + ev->type = ptev_disabled; + ev->variant.disabled.ip = ip; + + return 1; +} + +static int process_one_event_before(struct pt_insn_decoder *decoder, + struct pt_insn *insn) +{ + struct pt_event *ev; + + if (!decoder || !insn) + return -pte_internal; + + ev = &decoder->event; + switch (ev->type) { + case ptev_enabled: + return process_enabled_event(decoder, insn); + + case ptev_async_branch: + if (ev->variant.async_branch.from == decoder->ip) + return process_async_branch_event(decoder); + + return 0; + + case ptev_async_disabled: + /* We would normally process the disabled event when peeking + * at the next instruction in order to indicate the disabling + * properly. + * This is to catch the case where we disable tracing before + * we actually started. + */ + if (ev->variant.async_disabled.at == decoder->ip) { + if (decoder->query.config.errata.skd022) { + int errcode; + + errcode = handle_erratum_skd022(decoder); + if (errcode < 0) + return errcode; + + if (errcode) + return 0; + } + + return process_async_disabled_event(decoder, insn); + } + + return 0; + + case ptev_async_paging: + if (ev->ip_suppressed || + ev->variant.async_paging.ip == decoder->ip) + return process_paging_event(decoder); + + return 0; + + case ptev_async_vmcs: + if (ev->ip_suppressed || + ev->variant.async_vmcs.ip == decoder->ip) + return process_vmcs_event(decoder); + + return 0; + + case ptev_disabled: + return 0; + + case ptev_paging: + if (!decoder->enabled) + return process_paging_event(decoder); + + return 0; + + case ptev_vmcs: + if (!decoder->enabled) + return process_vmcs_event(decoder); + + return 0; + + case ptev_overflow: + return process_overflow_event(decoder, insn); + + case ptev_exec_mode: + if (ev->ip_suppressed || + ev->variant.exec_mode.ip == decoder->ip) + return process_exec_mode_event(decoder); + + return 0; + + case ptev_tsx: + /* We would normally process the tsx event when peeking + * at the next instruction in order to indicate commits + * and aborts properly. + * This is to catch the case where we just sync'ed. + */ + if (ev->ip_suppressed || + ev->variant.tsx.ip == decoder->ip) + return process_tsx_event(decoder, NULL); + + return 0; + + case ptev_stop: + /* We would normally process the stop event when peeking at + * the next instruction in order to indicate the stop + * properly. + * This is to catch the case where we stop before we actually + * started. + */ + return process_stop_event(decoder, NULL); + } + + /* Diagnose an unknown event. */ + return -pte_internal; +} + +static int process_events_before(struct pt_insn_decoder *decoder, + struct pt_insn *insn) +{ + if (!decoder || !insn) + return -pte_internal; + + for (;;) { + int pending, processed; + + pending = event_pending(decoder); + if (pending < 0) + return pending; + + if (!pending) + break; + + processed = process_one_event_before(decoder, insn); + if (processed < 0) + return processed; + + if (!processed) + break; + + decoder->process_event = 0; + } + + return 0; +} + +static int process_one_event_after(struct pt_insn_decoder *decoder, + struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + struct pt_event *ev; + + if (!decoder) + return -pte_internal; + + ev = &decoder->event; + switch (ev->type) { + case ptev_enabled: + case ptev_overflow: + case ptev_async_paging: + case ptev_async_vmcs: + case ptev_async_disabled: + case ptev_async_branch: + case ptev_exec_mode: + case ptev_tsx: + case ptev_stop: + /* We will process those events on the next iteration. */ + return 0; + + case ptev_disabled: + if (ev->ip_suppressed) { + if (pt_insn_is_far_branch(insn, iext) || + pt_insn_changes_cpl(insn, iext) || + pt_insn_changes_cr3(insn, iext)) + return process_sync_disabled_event(decoder, + insn, iext); + + } else { + switch (insn->iclass) { + case ptic_other: + break; + + case ptic_call: + case ptic_jump: + /* If we got an IP with the disabled event, we + * may ignore direct branches that go to a + * different IP. + */ + if (iext->variant.branch.is_direct) { + uint64_t ip; + + ip = insn->ip; + ip += insn->size; + ip += iext->variant.branch.displacement; + + if (ip != ev->variant.disabled.ip) + break; + } + + /* Fall through. */ + case ptic_return: + case ptic_far_call: + case ptic_far_return: + case ptic_far_jump: + case ptic_cond_jump: + return process_sync_disabled_event(decoder, + insn, iext); + + case ptic_error: + return -pte_bad_insn; + } + } + + return 0; + + case ptev_paging: + if (pt_insn_binds_to_pip(insn, iext) && + !decoder->paging_event_bound) { + /* Each instruction only binds to one paging event. */ + decoder->paging_event_bound = 1; + + return process_paging_event(decoder); + } + + return 0; + + case ptev_vmcs: + if (pt_insn_binds_to_vmcs(insn, iext) && + !decoder->vmcs_event_bound) { + /* Each instruction only binds to one vmcs event. */ + decoder->vmcs_event_bound = 1; + + return process_vmcs_event(decoder); + } + + return 0; + } + + return -pte_internal; +} + +static int process_events_after(struct pt_insn_decoder *decoder, + struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + int pending, processed, errcode; + + if (!decoder || !insn) + return -pte_internal; + + pending = event_pending(decoder); + if (pending <= 0) + return pending; + + decoder->paging_event_bound = 0; + decoder->vmcs_event_bound = 0; + + for (;;) { + processed = process_one_event_after(decoder, insn, iext); + if (processed < 0) + return processed; + + if (!processed) + return 0; + + decoder->process_event = 0; + + errcode = process_events_before(decoder, insn); + if (errcode < 0) + return errcode; + + pending = event_pending(decoder); + if (pending <= 0) + return pending; + } +} + +enum { + /* The maximum number of steps to take when determining whether the + * event location can be reached. + */ + bdm64_max_steps = 0x100 +}; + +/* Try to work around erratum BDM64. + * + * If we got a transaction abort immediately following a branch that produced + * trace, the trace for that branch might have been corrupted. + * + * Returns a positive integer if the erratum was handled. + * Returns zero if the erratum does not seem to apply. + * Returns a negative error code otherwise. + */ +static int handle_erratum_bdm64(struct pt_insn_decoder *decoder, + const struct pt_event *ev, + const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + int status; + + if (!decoder || !ev || !insn || !iext) + return -pte_internal; + + /* This only affects aborts. */ + if (!ev->variant.tsx.aborted) + return 0; + + /* This only affects branches. */ + if (!pt_insn_is_branch(insn, iext)) + return 0; + + /* Let's check if we can reach the event location from here. + * + * If we can, let's assume the erratum did not hit. We might still be + * wrong but we're not able to tell. + */ + status = pt_insn_range_is_contiguous(decoder->ip, ev->variant.tsx.ip, + decoder->mode, decoder->image, + &decoder->asid, bdm64_max_steps); + if (status > 0) + return 0; + + /* We can't reach the event location. This could either mean that we + * stopped too early (and status is zero) or that the erratum hit. + * + * We assume the latter and pretend that the previous branch brought us + * to the event location, instead. + */ + decoder->ip = ev->variant.tsx.ip; + + return 1; +} + +static int process_one_event_peek(struct pt_insn_decoder *decoder, + struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + struct pt_event *ev; + + if (!decoder) + return -pte_internal; + + ev = &decoder->event; + switch (ev->type) { + case ptev_async_disabled: + if (ev->variant.async_disabled.at == decoder->ip) { + if (decoder->query.config.errata.skd022) { + int errcode; + + errcode = handle_erratum_skd022(decoder); + if (errcode < 0) + return errcode; + + if (errcode) + return 0; + } + + return process_async_disabled_event(decoder, insn); + } + + return 0; + + case ptev_tsx: + if (decoder->query.config.errata.bdm64) { + int errcode; + + errcode = handle_erratum_bdm64(decoder, ev, insn, iext); + if (errcode < 0) + return errcode; + } + + if (ev->ip_suppressed || + ev->variant.tsx.ip == decoder->ip) + return process_tsx_event(decoder, insn); + + return 0; + + case ptev_async_branch: + /* We indicate the interrupt in the preceding instruction. + */ + if (ev->variant.async_branch.from == decoder->ip) { + insn->interrupted = 1; + + return process_async_branch_event(decoder); + } + + return 0; + + case ptev_enabled: + case ptev_overflow: + case ptev_disabled: + case ptev_paging: + case ptev_vmcs: + return 0; + + case ptev_exec_mode: + /* We would normally process this event in the next iteration. + * + * We process it here, as well, in case we have a peek event + * hiding behind. + */ + if (ev->ip_suppressed || + ev->variant.exec_mode.ip == decoder->ip) + return process_exec_mode_event(decoder); + + return 0; + + case ptev_async_paging: + /* We would normally process this event in the next iteration. + * + * We process it here, as well, in case we have a peek event + * hiding behind. + */ + if (ev->ip_suppressed || + ev->variant.async_paging.ip == decoder->ip) + return process_paging_event(decoder); + + return 0; + + case ptev_async_vmcs: + /* We would normally process this event in the next iteration. + * + * We process it here, as well, in case we have a peek event + * hiding behind. + */ + if (ev->ip_suppressed || + ev->variant.async_vmcs.ip == decoder->ip) + return process_vmcs_event(decoder); + + return 0; + + case ptev_stop: + return process_stop_event(decoder, insn); + } + + return -pte_internal; +} + +static int process_events_peek(struct pt_insn_decoder *decoder, + struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + if (!decoder || !insn) + return -pte_internal; + + for (;;) { + int pending, processed; + + pending = event_pending(decoder); + if (pending < 0) + return pending; + + if (!pending) + break; + + processed = process_one_event_peek(decoder, insn, iext); + if (processed < 0) + return processed; + + if (!processed) + break; + + decoder->process_event = 0; + } + + return 0; +} + +static int proceed(struct pt_insn_decoder *decoder, const struct pt_insn *insn, + const struct pt_insn_ext *iext) +{ + if (!decoder || !insn || !iext) + return -pte_internal; + + /* Branch displacements apply to the next instruction. */ + decoder->ip += insn->size; + + /* We handle non-branches, non-taken conditional branches, and + * compressed returns directly in the switch and do some pre-work for + * calls. + * + * All kinds of branches are handled below the switch. + */ + switch (insn->iclass) { + case ptic_other: + return 0; + + case ptic_cond_jump: { + int status, taken; + + status = pt_qry_cond_branch(&decoder->query, &taken); + if (status < 0) + return status; + + decoder->status = status; + if (!taken) + return 0; + + break; + } + + case ptic_call: + /* Log the call for return compression. + * + * Unless this is a call to the next instruction as is used + * for position independent code. + */ + if (iext->variant.branch.displacement || + !iext->variant.branch.is_direct) + pt_retstack_push(&decoder->retstack, decoder->ip); + + break; + + case ptic_return: { + int taken, status; + + /* Check for a compressed return. */ + status = pt_qry_cond_branch(&decoder->query, &taken); + if (status >= 0) { + decoder->status = status; + + /* A compressed return is indicated by a taken + * conditional branch. + */ + if (!taken) + return -pte_bad_retcomp; + + return pt_retstack_pop(&decoder->retstack, + &decoder->ip); + } + + break; + } + + case ptic_jump: + case ptic_far_call: + case ptic_far_return: + case ptic_far_jump: + break; + + case ptic_error: + return -pte_bad_insn; + } + + /* Process a direct or indirect branch. + * + * This combines calls, uncompressed returns, taken conditional jumps, + * and all flavors of far transfers. + */ + if (iext->variant.branch.is_direct) + decoder->ip += iext->variant.branch.displacement; + else { + int status; + + status = pt_qry_indirect_branch(&decoder->query, + &decoder->ip); + + if (status < 0) + return status; + + decoder->status = status; + + /* We do need an IP to proceed. */ + if (status & pts_ip_suppressed) + return -pte_noip; + } + + return 0; +} + +static int pt_insn_status(const struct pt_insn_decoder *decoder) +{ + int status, flags; + + if (!decoder) + return -pte_internal; + + status = decoder->status; + flags = 0; + + /* Forward end-of-trace indications. + * + * Postpone it as long as we're still processing events, though. + */ + if ((status & pts_eos) && !decoder->process_event) + flags |= pts_eos; + + return flags; +} + +static inline int insn_to_user(struct pt_insn *uinsn, size_t size, + const struct pt_insn *insn) +{ + if (!uinsn || !insn) + return -pte_internal; + + if (uinsn == insn) + return 0; + + /* Zero out any unknown bytes. */ + if (sizeof(*insn) < size) { + memset(uinsn + sizeof(*insn), 0, size - sizeof(*insn)); + + size = sizeof(*insn); + } + + memcpy(uinsn, insn, size); + + return 0; +} + +int pt_insn_next(struct pt_insn_decoder *decoder, struct pt_insn *uinsn, + size_t size) +{ + struct pt_insn_ext iext; + struct pt_insn insn, *pinsn; + int errcode; + + if (!uinsn || !decoder) + return -pte_invalid; + + pinsn = size == sizeof(insn) ? uinsn : &insn; + + /* Zero-initialize the instruction in case of error returns. */ + memset(pinsn, 0, sizeof(*pinsn)); + + /* We process events three times: + * - once based on the current IP. + * - once based on the instruction at that IP. + * - once based on the next IP. + * + * Between the first and second round of event processing, we decode + * the instruction and fill in @insn. + * + * This is necessary to attribute events to the correct instruction. + */ + errcode = process_events_before(decoder, pinsn); + if (errcode < 0) + goto err; + + /* If tracing is disabled at this point, we should be at the end + * of the trace - otherwise there should have been a re-enable + * event. + */ + if (!decoder->enabled) { + struct pt_event event; + + /* Any query should give us an end of stream, error. */ + errcode = pt_qry_event(&decoder->query, &event, sizeof(event)); + if (errcode != -pte_eos) + errcode = -pte_no_enable; + + goto err; + } + + /* Decode the current instruction. */ + if (decoder->speculative) + pinsn->speculative = 1; + pinsn->ip = decoder->ip; + pinsn->mode = decoder->mode; + + errcode = pt_insn_decode(pinsn, &iext, decoder->image, &decoder->asid); + if (errcode < 0) + goto err; + + /* After decoding the instruction, we must not change the IP in this + * iteration - postpone processing of events that would to the next + * iteration. + */ + decoder->event_may_change_ip = 0; + + errcode = process_events_after(decoder, pinsn, &iext); + if (errcode < 0) + goto err; + + /* If event processing disabled tracing, we're done for this + * iteration - we will process the re-enable event on the next. + * + * Otherwise, we determine the next instruction and peek ahead. + * + * This may indicate an event already in this instruction. + */ + if (decoder->enabled) { + /* Proceed errors are signaled one instruction too early. */ + errcode = proceed(decoder, pinsn, &iext); + if (errcode < 0) + goto err; + + /* Peek errors are ignored. We will run into them again + * in the next iteration. + */ + (void) process_events_peek(decoder, pinsn, &iext); + } + + errcode = insn_to_user(uinsn, size, pinsn); + if (errcode < 0) + return errcode; + + /* We're done with this instruction. Now we may change the IP again. */ + decoder->event_may_change_ip = 1; + + return pt_insn_status(decoder); + +err: + /* We provide the (incomplete) instruction also in case of errors. + * + * For decode or post-decode event-processing errors, the IP or + * other fields are already valid and may help diagnose the error. + */ + (void) insn_to_user(uinsn, size, pinsn); + + return errcode; +} diff --git a/libipt/src/pt_last_ip.c b/libipt/src/pt_last_ip.c new file mode 100644 index 0000000..366b8b0 --- /dev/null +++ b/libipt/src/pt_last_ip.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_last_ip.h" + +#include "intel-pt.h" + + +void pt_last_ip_init(struct pt_last_ip *last_ip) +{ + if (!last_ip) + return; + + last_ip->ip = 0ull; + last_ip->have_ip = 0; + last_ip->suppressed = 0; +} + +int pt_last_ip_query(uint64_t *ip, const struct pt_last_ip *last_ip) +{ + if (!last_ip) + return -pte_invalid; + + if (!last_ip->have_ip) { + if (ip) + *ip = 0ull; + return -pte_noip; + } + + if (last_ip->suppressed) { + if (ip) + *ip = 0ull; + return -pte_ip_suppressed; + } + + if (ip) + *ip = last_ip->ip; + + return 0; +} + +/* Sign-extend a uint64_t value. */ +static uint64_t sext(uint64_t val, uint8_t sign) +{ + uint64_t signbit, mask; + + signbit = 1ull << (sign - 1); + mask = ~0ull << sign; + + return val & signbit ? val | mask : val & ~mask; +} + +int pt_last_ip_update_ip(struct pt_last_ip *last_ip, + const struct pt_packet_ip *packet, + const struct pt_config *config) +{ + (void) config; + + if (!last_ip || !packet) + return -pte_invalid; + + switch (packet->ipc) { + case pt_ipc_suppressed: + last_ip->suppressed = 1; + return 0; + + case pt_ipc_sext_48: + last_ip->ip = sext(packet->ip, 48); + last_ip->have_ip = 1; + last_ip->suppressed = 0; + return 0; + + case pt_ipc_update_16: + last_ip->ip = (last_ip->ip & ~0xffffull) + | (packet->ip & 0xffffull); + last_ip->have_ip = 1; + last_ip->suppressed = 0; + return 0; + + case pt_ipc_update_32: + last_ip->ip = (last_ip->ip & ~0xffffffffull) + | (packet->ip & 0xffffffffull); + last_ip->have_ip = 1; + last_ip->suppressed = 0; + return 0; + + case pt_ipc_update_48: + last_ip->ip = (last_ip->ip & ~0xffffffffffffull) + | (packet->ip & 0xffffffffffffull); + last_ip->have_ip = 1; + last_ip->suppressed = 0; + return 0; + + case pt_ipc_full: + last_ip->ip = packet->ip; + last_ip->have_ip = 1; + last_ip->suppressed = 0; + return 0; + } + + return -pte_bad_packet; +} diff --git a/libipt/src/pt_packet.c b/libipt/src/pt_packet.c new file mode 100644 index 0000000..80d18ad --- /dev/null +++ b/libipt/src/pt_packet.c @@ -0,0 +1,460 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_packet.h" + +#include "intel-pt.h" + +#include + + +static uint64_t pt_pkt_read_value(const uint8_t *pos, int size) +{ + uint64_t val; + int idx; + + for (val = 0, idx = 0; idx < size; ++idx) { + uint64_t byte = *pos++; + + byte <<= (idx * 8); + val |= byte; + } + + return val; +} + +int pt_pkt_read_unknown(struct pt_packet *packet, const uint8_t *pos, + const struct pt_config *config) +{ + int (*decode)(struct pt_packet_unknown *, const struct pt_config *, + const uint8_t *, void *); + int size; + + if (!packet || !pos || !config) + return -pte_internal; + + decode = config->decode.callback; + if (!decode) + return -pte_bad_opc; + + /* Fill in some default values. */ + packet->payload.unknown.packet = pos; + packet->payload.unknown.priv = NULL; + + /* We accept a size of zero to allow the callback to modify the + * trace buffer and resume normal decoding. + */ + size = (*decode)(&packet->payload.unknown, config, pos, + config->decode.context); + if (size < 0) + return size; + + if (size > UCHAR_MAX) + return -pte_invalid; + + packet->type = ppt_unknown; + packet->size = (uint8_t) size; + + if (config->end < pos + size) + return -pte_eos; + + return size; +} + +int pt_pkt_read_psb(const uint8_t *pos, const struct pt_config *config) +{ + int count; + + if (!pos || !config) + return -pte_internal; + + if (config->end < pos + ptps_psb) + return -pte_eos; + + pos += pt_opcs_psb; + + for (count = 0; count < pt_psb_repeat_count; ++count) { + if (*pos++ != pt_psb_hi) + return -pte_bad_packet; + if (*pos++ != pt_psb_lo) + return -pte_bad_packet; + } + + return ptps_psb; +} + +static int pt_pkt_ip_size(enum pt_ip_compression ipc) +{ + switch (ipc) { + case pt_ipc_suppressed: + return 0; + + case pt_ipc_update_16: + return 2; + + case pt_ipc_update_32: + return 4; + + case pt_ipc_update_48: + case pt_ipc_sext_48: + return 6; + + case pt_ipc_full: + return 8; + } + + return -pte_bad_packet; +} + +int pt_pkt_read_ip(struct pt_packet_ip *packet, const uint8_t *pos, + const struct pt_config *config) +{ + uint64_t ip; + uint8_t ipc; + int ipsize; + + if (!packet || !pos || !config) + return -pte_internal; + + ipc = (*pos++ >> pt_opm_ipc_shr) & pt_opm_ipc_shr_mask; + + ip = 0ull; + ipsize = pt_pkt_ip_size((enum pt_ip_compression) ipc); + if (ipsize < 0) + return ipsize; + + if (config->end < pos + ipsize) + return -pte_eos; + + if (ipsize) + ip = pt_pkt_read_value(pos, ipsize); + + packet->ipc = (enum pt_ip_compression) ipc; + packet->ip = ip; + + return ipsize + 1; +} + +static uint8_t pt_pkt_tnt_bit_size(uint64_t payload) +{ + uint8_t size; + + /* The payload bit-size is the bit-index of the payload's stop-bit, + * which itself is not part of the payload proper. + */ + for (size = 0; ; size += 1) { + payload >>= 1; + if (!payload) + break; + } + + return size; +} + +static int pt_pkt_read_tnt(struct pt_packet_tnt *packet, uint64_t payload) +{ + uint8_t bit_size; + + if (!packet) + return -pte_internal; + + bit_size = pt_pkt_tnt_bit_size(payload); + if (!bit_size) + return -pte_bad_packet; + + /* Remove the stop bit from the payload. */ + payload &= ~(1ull << bit_size); + + packet->payload = payload; + packet->bit_size = bit_size; + + return 0; +} + +int pt_pkt_read_tnt_8(struct pt_packet_tnt *packet, const uint8_t *pos, + const struct pt_config *config) +{ + int errcode; + + (void) config; + + if (!pos) + return -pte_internal; + + errcode = pt_pkt_read_tnt(packet, pos[0] >> pt_opm_tnt_8_shr); + if (errcode < 0) + return errcode; + + return ptps_tnt_8; +} + +int pt_pkt_read_tnt_64(struct pt_packet_tnt *packet, const uint8_t *pos, + const struct pt_config *config) +{ + uint64_t payload; + int errcode; + + if (!pos || !config) + return -pte_internal; + + if (config->end < pos + ptps_tnt_64) + return -pte_eos; + + payload = pt_pkt_read_value(pos + pt_opcs_tnt_64, pt_pl_tnt_64_size); + + errcode = pt_pkt_read_tnt(packet, payload); + if (errcode < 0) + return errcode; + + return ptps_tnt_64; +} + +int pt_pkt_read_pip(struct pt_packet_pip *packet, const uint8_t *pos, + const struct pt_config *config) +{ + uint64_t payload; + + if (!packet || !pos || !config) + return -pte_internal; + + if (config->end < pos + ptps_pip) + return -pte_eos; + + /* Read the payload. */ + payload = pt_pkt_read_value(pos + pt_opcs_pip, pt_pl_pip_size); + + /* Extract the non-root information from the payload. */ + packet->nr = payload & pt_pl_pip_nr; + + /* Create the cr3 value. */ + payload >>= pt_pl_pip_shr; + payload <<= pt_pl_pip_shl; + packet->cr3 = payload; + + return ptps_pip; +} + +static int pt_pkt_read_mode_exec(struct pt_packet_mode_exec *packet, + uint8_t mode) +{ + if (!packet) + return -pte_internal; + + packet->csl = (mode & pt_mob_exec_csl) != 0; + packet->csd = (mode & pt_mob_exec_csd) != 0; + + return ptps_mode; +} + +static int pt_pkt_read_mode_tsx(struct pt_packet_mode_tsx *packet, + uint8_t mode) +{ + if (!packet) + return -pte_internal; + + packet->intx = (mode & pt_mob_tsx_intx) != 0; + packet->abrt = (mode & pt_mob_tsx_abrt) != 0; + + return ptps_mode; +} + +int pt_pkt_read_mode(struct pt_packet_mode *packet, const uint8_t *pos, + const struct pt_config *config) +{ + uint8_t payload, mode, leaf; + + if (!packet || !pos || !config) + return -pte_internal; + + if (config->end < pos + ptps_mode) + return -pte_eos; + + payload = pos[pt_opcs_mode]; + leaf = payload & pt_mom_leaf; + mode = payload & pt_mom_bits; + + packet->leaf = (enum pt_mode_leaf) leaf; + switch (leaf) { + default: + return -pte_bad_packet; + + case pt_mol_exec: + return pt_pkt_read_mode_exec(&packet->bits.exec, mode); + + case pt_mol_tsx: + return pt_pkt_read_mode_tsx(&packet->bits.tsx, mode); + } +} + +int pt_pkt_read_tsc(struct pt_packet_tsc *packet, const uint8_t *pos, + const struct pt_config *config) +{ + if (!packet || !pos || !config) + return -pte_internal; + + if (config->end < pos + ptps_tsc) + return -pte_eos; + + packet->tsc = pt_pkt_read_value(pos + pt_opcs_tsc, pt_pl_tsc_size); + + return ptps_tsc; +} + +int pt_pkt_read_cbr(struct pt_packet_cbr *packet, const uint8_t *pos, + const struct pt_config *config) +{ + if (!packet || !pos || !config) + return -pte_internal; + + if (config->end < pos + ptps_cbr) + return -pte_eos; + + packet->ratio = pos[2]; + + return ptps_cbr; +} + +int pt_pkt_read_tma(struct pt_packet_tma *packet, const uint8_t *pos, + const struct pt_config *config) +{ + uint16_t ctc, fc; + + if (!packet || !pos || !config) + return -pte_internal; + + if (config->end < pos + ptps_tma) + return -pte_eos; + + ctc = pos[pt_pl_tma_ctc_0]; + ctc |= pos[pt_pl_tma_ctc_1] << 8; + + fc = pos[pt_pl_tma_fc_0]; + fc |= pos[pt_pl_tma_fc_1] << 8; + + if (fc & ~pt_pl_tma_fc_mask) + return -pte_bad_packet; + + packet->ctc = ctc; + packet->fc = fc; + + return ptps_tma; +} + +int pt_pkt_read_mtc(struct pt_packet_mtc *packet, const uint8_t *pos, + const struct pt_config *config) +{ + if (!packet || !pos || !config) + return -pte_internal; + + if (config->end < pos + ptps_mtc) + return -pte_eos; + + packet->ctc = pos[pt_opcs_mtc]; + + return ptps_mtc; +} + +int pt_pkt_read_cyc(struct pt_packet_cyc *packet, const uint8_t *pos, + const struct pt_config *config) +{ + const uint8_t *begin, *end; + uint64_t value; + uint8_t cyc, ext, shl; + + if (!packet || !pos || !config) + return -pte_internal; + + begin = pos; + end = config->end; + + /* The first byte contains the opcode and part of the payload. + * We already checked that this first byte is within bounds. + */ + cyc = *pos++; + + ext = cyc & pt_opm_cyc_ext; + cyc >>= pt_opm_cyc_shr; + + value = cyc; + shl = (8 - pt_opm_cyc_shr); + + while (ext) { + uint64_t bits; + + if (end <= pos) + return -pte_eos; + + bits = *pos++; + ext = bits & pt_opm_cycx_ext; + + bits >>= pt_opm_cycx_shr; + bits <<= shl; + + shl += (8 - pt_opm_cycx_shr); + if (sizeof(value) * 8 < shl) + return -pte_bad_packet; + + value |= bits; + } + + packet->value = value; + + return (int) (pos - begin); +} + +int pt_pkt_read_vmcs(struct pt_packet_vmcs *packet, const uint8_t *pos, + const struct pt_config *config) +{ + uint64_t payload; + + if (!packet || !pos || !config) + return -pte_internal; + + if (config->end < pos + ptps_vmcs) + return -pte_eos; + + payload = pt_pkt_read_value(pos + pt_opcs_vmcs, pt_pl_vmcs_size); + + packet->base = payload << pt_pl_vmcs_shl; + + return ptps_vmcs; +} + +int pt_pkt_read_mnt(struct pt_packet_mnt *packet, const uint8_t *pos, + const struct pt_config *config) +{ + if (!packet || !pos || !config) + return -pte_internal; + + if (config->end < pos + ptps_mnt) + return -pte_eos; + + packet->payload = pt_pkt_read_value(pos + pt_opcs_mnt, pt_pl_mnt_size); + + return ptps_mnt; +} diff --git a/libipt/src/pt_packet_decoder.c b/libipt/src/pt_packet_decoder.c new file mode 100644 index 0000000..a7cfed5 --- /dev/null +++ b/libipt/src/pt_packet_decoder.c @@ -0,0 +1,626 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_packet_decoder.h" +#include "pt_decoder_function.h" +#include "pt_packet.h" +#include "pt_sync.h" +#include "pt_config.h" + +#include +#include + + +int pt_pkt_decoder_init(struct pt_packet_decoder *decoder, + const struct pt_config *config) +{ + int errcode; + + if (!decoder || !config) + return -pte_invalid; + + memset(decoder, 0, sizeof(*decoder)); + + errcode = pt_config_from_user(&decoder->config, config); + if (errcode < 0) + return errcode; + + return 0; +} + +struct pt_packet_decoder *pt_pkt_alloc_decoder(const struct pt_config *config) +{ + struct pt_packet_decoder *decoder; + int errcode; + + decoder = malloc(sizeof(*decoder)); + if (!decoder) + return NULL; + + errcode = pt_pkt_decoder_init(decoder, config); + if (errcode < 0) { + free(decoder); + return NULL; + } + + return decoder; +} + +void pt_pkt_decoder_fini(struct pt_packet_decoder *decoder) +{ + (void) decoder; + + /* Nothing to do. */ +} + +void pt_pkt_free_decoder(struct pt_packet_decoder *decoder) +{ + pt_pkt_decoder_fini(decoder); + free(decoder); +} + +int pt_pkt_sync_forward(struct pt_packet_decoder *decoder) +{ + const uint8_t *pos, *sync; + int errcode; + + if (!decoder) + return -pte_invalid; + + sync = decoder->sync; + pos = decoder->pos; + if (!pos) + pos = decoder->config.begin; + + if (pos == sync) + pos += ptps_psb; + + errcode = pt_sync_forward(&sync, pos, &decoder->config); + if (errcode < 0) + return errcode; + + decoder->sync = sync; + decoder->pos = sync; + + return 0; +} + +int pt_pkt_sync_backward(struct pt_packet_decoder *decoder) +{ + const uint8_t *pos, *sync; + int errcode; + + if (!decoder) + return -pte_invalid; + + pos = decoder->pos; + if (!pos) + pos = decoder->config.end; + + errcode = pt_sync_backward(&sync, pos, &decoder->config); + if (errcode < 0) + return errcode; + + decoder->sync = sync; + decoder->pos = sync; + + return 0; +} + +int pt_pkt_sync_set(struct pt_packet_decoder *decoder, uint64_t offset) +{ + const uint8_t *begin, *end, *pos; + + if (!decoder) + return -pte_invalid; + + begin = decoder->config.begin; + end = decoder->config.end; + pos = begin + offset; + + if (end < pos || pos < begin) + return -pte_eos; + + decoder->sync = pos; + decoder->pos = pos; + + return 0; +} + +int pt_pkt_get_offset(struct pt_packet_decoder *decoder, uint64_t *offset) +{ + const uint8_t *begin, *pos; + + if (!decoder || !offset) + return -pte_invalid; + + begin = decoder->config.begin; + pos = decoder->pos; + + if (!pos) + return -pte_nosync; + + *offset = pos - begin; + return 0; +} + +int pt_pkt_get_sync_offset(struct pt_packet_decoder *decoder, uint64_t *offset) +{ + const uint8_t *begin, *sync; + + if (!decoder || !offset) + return -pte_invalid; + + begin = decoder->config.begin; + sync = decoder->sync; + + if (!sync) + return -pte_nosync; + + *offset = sync - begin; + return 0; +} + +const struct pt_config * +pt_pkt_get_config(const struct pt_packet_decoder *decoder) +{ + if (!decoder) + return NULL; + + return &decoder->config; +} + +static inline int pkt_to_user(struct pt_packet *upkt, size_t size, + const struct pt_packet *pkt) +{ + if (!upkt || !pkt) + return -pte_internal; + + if (upkt == pkt) + return 0; + + /* Zero out any unknown bytes. */ + if (sizeof(*pkt) < size) { + memset(upkt + sizeof(*pkt), 0, size - sizeof(*pkt)); + + size = sizeof(*pkt); + } + + memcpy(upkt, pkt, size); + + return 0; +} + +int pt_pkt_next(struct pt_packet_decoder *decoder, struct pt_packet *packet, + size_t psize) +{ + const struct pt_decoder_function *dfun; + struct pt_packet pkt, *ppkt; + int errcode, size; + + if (!packet || !decoder) + return -pte_invalid; + + ppkt = psize == sizeof(pkt) ? packet : &pkt; + + errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config); + if (errcode < 0) + return errcode; + + if (!dfun) + return -pte_internal; + + if (!dfun->packet) + return -pte_internal; + + size = dfun->packet(decoder, ppkt); + if (size < 0) + return size; + + errcode = pkt_to_user(packet, psize, ppkt); + if (errcode < 0) + return errcode; + + decoder->pos += size; + + return size; +} + +int pt_pkt_decode_unknown(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder) + return -pte_internal; + + size = pt_pkt_read_unknown(packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + return size; +} + +int pt_pkt_decode_pad(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + (void) decoder; + + if (!packet) + return -pte_internal; + + packet->type = ppt_pad; + packet->size = ptps_pad; + + return ptps_pad; +} + +int pt_pkt_decode_psb(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder) + return -pte_internal; + + size = pt_pkt_read_psb(decoder->pos, &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_psb; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_tip(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_tip; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_tnt_8(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_tnt_8(&packet->payload.tnt, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_tnt_8; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_tnt_64(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_tnt_64(&packet->payload.tnt, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_tnt_64; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_tip_pge(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_tip_pge; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_tip_pgd(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_tip_pgd; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_fup(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_fup; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_pip(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_pip(&packet->payload.pip, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_pip; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_ovf(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + (void) decoder; + + if (!packet) + return -pte_internal; + + packet->type = ppt_ovf; + packet->size = ptps_ovf; + + return ptps_ovf; +} + +int pt_pkt_decode_mode(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_mode(&packet->payload.mode, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_mode; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_psbend(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + (void) decoder; + + if (!packet) + return -pte_internal; + + packet->type = ppt_psbend; + packet->size = ptps_psbend; + + return ptps_psbend; +} + +int pt_pkt_decode_tsc(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_tsc(&packet->payload.tsc, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_tsc; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_cbr(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_cbr(&packet->payload.cbr, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_cbr; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_tma(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_tma(&packet->payload.tma, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_tma; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_mtc(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_mtc(&packet->payload.mtc, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_mtc; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_cyc(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_cyc(&packet->payload.cyc, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_cyc; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_stop(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + (void) decoder; + + if (!packet) + return -pte_internal; + + packet->type = ppt_stop; + packet->size = ptps_stop; + + return ptps_stop; +} + +int pt_pkt_decode_vmcs(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_vmcs(&packet->payload.vmcs, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_vmcs; + packet->size = (uint8_t) size; + + return size; +} + +int pt_pkt_decode_mnt(struct pt_packet_decoder *decoder, + struct pt_packet *packet) +{ + int size; + + if (!decoder || !packet) + return -pte_internal; + + size = pt_pkt_read_mnt(&packet->payload.mnt, decoder->pos, + &decoder->config); + if (size < 0) + return size; + + packet->type = ppt_mnt; + packet->size = (uint8_t) size; + + return size; +} diff --git a/libipt/src/pt_query_decoder.c b/libipt/src/pt_query_decoder.c new file mode 100644 index 0000000..100fc8c --- /dev/null +++ b/libipt/src/pt_query_decoder.c @@ -0,0 +1,2525 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_query_decoder.h" +#include "pt_sync.h" +#include "pt_decoder_function.h" +#include "pt_packet.h" +#include "pt_packet_decoder.h" +#include "pt_config.h" + +#include "intel-pt.h" + +#include +#include +#include + + +/* Find a FUP in a PSB+ header. + * + * The packet @decoder must be synchronized onto the trace stream at the + * beginning or somewhere inside a PSB+ header. + * + * It uses @packet to hold trace packets during its search. If the search is + * successful, @packet will contain the first (and hopefully only) FUP packet in + * this PSB+. Otherwise, @packet may contain anything. + * + * Returns one if a FUP packet is found (@packet will contain it). + * Returns zero if no FUP packet is found (@packet is undefined). + * Returns a negative error code otherwise. + */ +static int pt_qry_find_header_fup(struct pt_packet *packet, + struct pt_packet_decoder *decoder) +{ + if (!packet || !decoder) + return -pte_internal; + + for (;;) { + int errcode; + + errcode = pt_pkt_next(decoder, packet, sizeof(*packet)); + if (errcode < 0) + return errcode; + + switch (packet->type) { + default: + /* Ignore the packet. */ + break; + + case ppt_psbend: + /* There's no FUP in here. */ + return 0; + + case ppt_fup: + /* Found it. */ + return 1; + } + } +} + +int pt_qry_decoder_init(struct pt_query_decoder *decoder, + const struct pt_config *config) +{ + int errcode; + + if (!decoder) + return -pte_invalid; + + memset(decoder, 0, sizeof(*decoder)); + + errcode = pt_config_from_user(&decoder->config, config); + if (errcode < 0) + return errcode; + + pt_last_ip_init(&decoder->ip); + pt_tnt_cache_init(&decoder->tnt); + pt_time_init(&decoder->time); + pt_tcal_init(&decoder->tcal); + pt_evq_init(&decoder->evq); + + return 0; +} + +struct pt_query_decoder *pt_qry_alloc_decoder(const struct pt_config *config) +{ + struct pt_query_decoder *decoder; + int errcode; + + decoder = malloc(sizeof(*decoder)); + if (!decoder) + return NULL; + + errcode = pt_qry_decoder_init(decoder, config); + if (errcode < 0) { + free(decoder); + return NULL; + } + + return decoder; +} + +void pt_qry_decoder_fini(struct pt_query_decoder *decoder) +{ + (void) decoder; + + /* Nothing to do. */ +} + +void pt_qry_free_decoder(struct pt_query_decoder *decoder) +{ + pt_qry_decoder_fini(decoder); + free(decoder); +} + +static void pt_qry_reset(struct pt_query_decoder *decoder) +{ + if (!decoder) + return; + + decoder->enabled = 0; + decoder->consume_packet = 0; + decoder->event = NULL; + + pt_last_ip_init(&decoder->ip); + pt_tnt_cache_init(&decoder->tnt); + pt_time_init(&decoder->time); + pt_tcal_init(&decoder->tcal); + pt_evq_init(&decoder->evq); +} + +static int pt_qry_will_event(const struct pt_query_decoder *decoder) +{ + const struct pt_decoder_function *dfun; + + if (!decoder) + return -pte_internal; + + dfun = decoder->next; + if (!dfun) + return 0; + + if (dfun->flags & pdff_event) + return 1; + + if (dfun->flags & pdff_psbend) + return pt_evq_pending(&decoder->evq, evb_psbend); + + if (dfun->flags & pdff_tip) + return pt_evq_pending(&decoder->evq, evb_tip); + + if (dfun->flags & pdff_fup) + return pt_evq_pending(&decoder->evq, evb_fup); + + return 0; +} + +static int pt_qry_will_eos(const struct pt_query_decoder *decoder) +{ + const struct pt_decoder_function *dfun; + int errcode; + + if (!decoder) + return -pte_internal; + + dfun = decoder->next; + if (dfun) + return 0; + + /* The decoding function may be NULL for two reasons: + * + * - we ran out of trace + * - we ran into a fetch error such as -pte_bad_opc + * + * Let's fetch again. + */ + errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config); + return errcode == -pte_eos; +} + +static int pt_qry_status_flags(const struct pt_query_decoder *decoder) +{ + int flags = 0; + + if (!decoder) + return -pte_internal; + + /* Some packets force out TNT and any deferred TIPs in order to + * establish the correct context for the subsequent packet. + * + * Users are expected to first navigate to the correct code region + * by using up the cached TNT bits before interpreting any subsequent + * packets. + * + * We do need to read ahead in order to signal upcoming events. We may + * have already decoded those packets while our user has not navigated + * to the correct code region, yet. + * + * In order to have our user use up the cached TNT bits first, we do + * not indicate the next event until the TNT cache is empty. + */ + if (pt_tnt_cache_is_empty(&decoder->tnt)) { + if (pt_qry_will_event(decoder)) + flags |= pts_event_pending; + + if (pt_qry_will_eos(decoder)) + flags |= pts_eos; + } + + return flags; +} + +static int pt_qry_provoke_fetch_error(const struct pt_query_decoder *decoder) +{ + const struct pt_decoder_function *dfun; + int errcode; + + if (!decoder) + return -pte_internal; + + /* Repeat the decoder fetch to reproduce the error. */ + errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config); + if (errcode < 0) + return errcode; + + /* We must get some error or something's wrong. */ + return -pte_internal; +} + +static int pt_qry_read_ahead(struct pt_query_decoder *decoder) +{ + for (;;) { + const struct pt_decoder_function *dfun; + int errcode; + + errcode = pt_df_fetch(&decoder->next, decoder->pos, + &decoder->config); + if (errcode) + return errcode; + + dfun = decoder->next; + if (!dfun) + return -pte_internal; + + if (!dfun->decode) + return -pte_internal; + + /* We're done once we reach + * + * - a branching related packet. */ + if (dfun->flags & (pdff_tip | pdff_tnt)) + return 0; + + /* - an event related packet. */ + if (pt_qry_will_event(decoder)) + return 0; + + /* Decode status update packets. */ + errcode = dfun->decode(decoder); + if (errcode) + return errcode; + } +} + +static int pt_qry_start(struct pt_query_decoder *decoder, const uint8_t *pos, + uint64_t *addr) +{ + const struct pt_decoder_function *dfun; + int status, errcode; + + if (!decoder || !pos) + return -pte_invalid; + + pt_qry_reset(decoder); + + decoder->sync = pos; + decoder->pos = pos; + + errcode = pt_df_fetch(&decoder->next, pos, &decoder->config); + if (errcode) + return errcode; + + dfun = decoder->next; + + /* We do need to start at a PSB in order to initialize the state. */ + if (dfun != &pt_decode_psb) + return -pte_nosync; + + /* Decode the PSB+ header to initialize the state. */ + errcode = dfun->decode(decoder); + if (errcode < 0) + return errcode; + + /* Fill in the start address. + * We do this before reading ahead since the latter may read an + * adjacent PSB+ that might change the decoder's IP, causing us + * to skip code. + */ + if (addr) { + status = pt_last_ip_query(addr, &decoder->ip); + + /* Make sure we don't clobber it later on. */ + if (!status) + addr = NULL; + } + + /* Read ahead until the first query-relevant packet. */ + errcode = pt_qry_read_ahead(decoder); + if (errcode < 0) + return errcode; + + /* We return the current decoder status. */ + status = pt_qry_status_flags(decoder); + if (status < 0) + return status; + + errcode = pt_last_ip_query(addr, &decoder->ip); + if (errcode < 0) { + /* Indicate the missing IP in the status. */ + if (addr) + status |= pts_ip_suppressed; + } + + return status; +} + +static int pt_qry_apply_tsc(struct pt_time *time, struct pt_time_cal *tcal, + const struct pt_packet_tsc *packet, + const struct pt_config *config) +{ + int errcode; + + /* We ignore configuration errors. They will result in imprecise + * calibration which will result in imprecise cycle-accurate timing. + * + * We currently do not track them. + */ + errcode = pt_tcal_update_tsc(tcal, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + /* We ignore configuration errors. They will result in imprecise + * timing and are tracked as packet losses in struct pt_time. + */ + errcode = pt_time_update_tsc(time, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + return 0; +} + +static int pt_qry_apply_header_tsc(struct pt_time *time, + struct pt_time_cal *tcal, + const struct pt_packet_tsc *packet, + const struct pt_config *config) +{ + int errcode; + + /* We ignore configuration errors. They will result in imprecise + * calibration which will result in imprecise cycle-accurate timing. + * + * We currently do not track them. + */ + errcode = pt_tcal_header_tsc(tcal, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + /* We ignore configuration errors. They will result in imprecise + * timing and are tracked as packet losses in struct pt_time. + */ + errcode = pt_time_update_tsc(time, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + return 0; +} + +static int pt_qry_apply_cbr(struct pt_time *time, struct pt_time_cal *tcal, + const struct pt_packet_cbr *packet, + const struct pt_config *config) +{ + int errcode; + + /* We ignore configuration errors. They will result in imprecise + * calibration which will result in imprecise cycle-accurate timing. + * + * We currently do not track them. + */ + errcode = pt_tcal_update_cbr(tcal, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + /* We ignore configuration errors. They will result in imprecise + * timing and are tracked as packet losses in struct pt_time. + */ + errcode = pt_time_update_cbr(time, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + return 0; +} + +static int pt_qry_apply_header_cbr(struct pt_time *time, + struct pt_time_cal *tcal, + const struct pt_packet_cbr *packet, + const struct pt_config *config) +{ + int errcode; + + /* We ignore configuration errors. They will result in imprecise + * calibration which will result in imprecise cycle-accurate timing. + * + * We currently do not track them. + */ + errcode = pt_tcal_header_cbr(tcal, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + /* We ignore configuration errors. They will result in imprecise + * timing and are tracked as packet losses in struct pt_time. + */ + errcode = pt_time_update_cbr(time, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + return 0; +} + +static int pt_qry_apply_tma(struct pt_time *time, struct pt_time_cal *tcal, + const struct pt_packet_tma *packet, + const struct pt_config *config) +{ + int errcode; + + /* We ignore configuration errors. They will result in imprecise + * calibration which will result in imprecise cycle-accurate timing. + * + * We currently do not track them. + */ + errcode = pt_tcal_update_tma(tcal, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + /* We ignore configuration errors. They will result in imprecise + * timing and are tracked as packet losses in struct pt_time. + */ + errcode = pt_time_update_tma(time, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + return 0; +} + +static int pt_qry_apply_mtc(struct pt_time *time, struct pt_time_cal *tcal, + const struct pt_packet_mtc *packet, + const struct pt_config *config) +{ + int errcode; + + /* We ignore configuration errors. They will result in imprecise + * calibration which will result in imprecise cycle-accurate timing. + * + * We currently do not track them. + */ + errcode = pt_tcal_update_mtc(tcal, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + /* We ignore configuration errors. They will result in imprecise + * timing and are tracked as packet losses in struct pt_time. + */ + errcode = pt_time_update_mtc(time, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + return 0; +} + +static int pt_qry_apply_cyc(struct pt_time *time, struct pt_time_cal *tcal, + const struct pt_packet_cyc *packet, + const struct pt_config *config) +{ + uint64_t fcr; + int errcode; + + /* We ignore configuration errors. They will result in imprecise + * calibration which will result in imprecise cycle-accurate timing. + * + * We currently do not track them. + */ + errcode = pt_tcal_update_cyc(tcal, packet, config); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + /* We need the FastCounter to Cycles ratio below. Fall back to + * an invalid ratio of 0 if calibration has not kicked in, yet. + * + * This will be tracked as packet loss in struct pt_time. + */ + errcode = pt_tcal_fcr(&fcr, tcal); + if (errcode < 0) { + if (errcode == -pte_no_time) + fcr = 0ull; + else + return errcode; + } + + /* We ignore configuration errors. They will result in imprecise + * timing and are tracked as packet losses in struct pt_time. + */ + errcode = pt_time_update_cyc(time, packet, config, fcr); + if (errcode < 0 && (errcode != -pte_bad_config)) + return errcode; + + return 0; +} + +int pt_qry_sync_forward(struct pt_query_decoder *decoder, uint64_t *ip) +{ + const uint8_t *pos, *sync; + int errcode; + + if (!decoder) + return -pte_invalid; + + sync = decoder->sync; + pos = decoder->pos; + if (!pos) + pos = decoder->config.begin; + + if (pos == sync) + pos += ptps_psb; + + errcode = pt_sync_forward(&sync, pos, &decoder->config); + if (errcode < 0) + return errcode; + + return pt_qry_start(decoder, sync, ip); +} + +int pt_qry_sync_backward(struct pt_query_decoder *decoder, uint64_t *ip) +{ + const uint8_t *start, *sync; + int errcode; + + if (!decoder) + return -pte_invalid; + + start = decoder->pos; + if (!start) + start = decoder->config.end; + + sync = start; + for (;;) { + errcode = pt_sync_backward(&sync, sync, &decoder->config); + if (errcode < 0) + return errcode; + + errcode = pt_qry_start(decoder, sync, ip); + if (errcode < 0) { + /* Ignore incomplete trace segments at the end. We need + * a full PSB+ to start decoding. + */ + if (errcode == -pte_eos) + continue; + + return errcode; + } + + /* An empty trace segment in the middle of the trace might bring + * us back to where we started. + * + * We're done when we reached a new position. + */ + if (decoder->pos != start) + break; + } + + return 0; +} + +int pt_qry_sync_set(struct pt_query_decoder *decoder, uint64_t *ip, + uint64_t offset) +{ + const uint8_t *sync, *pos; + int errcode; + + if (!decoder) + return -pte_invalid; + + pos = decoder->config.begin + offset; + + errcode = pt_sync_set(&sync, pos, &decoder->config); + if (errcode < 0) + return errcode; + + return pt_qry_start(decoder, sync, ip); +} + +int pt_qry_get_offset(struct pt_query_decoder *decoder, uint64_t *offset) +{ + const uint8_t *begin, *pos; + + if (!decoder || !offset) + return -pte_invalid; + + begin = decoder->config.begin; + pos = decoder->pos; + + if (!pos) + return -pte_nosync; + + *offset = pos - begin; + return 0; +} + +int pt_qry_get_sync_offset(struct pt_query_decoder *decoder, uint64_t *offset) +{ + const uint8_t *begin, *sync; + + if (!decoder || !offset) + return -pte_invalid; + + begin = decoder->config.begin; + sync = decoder->sync; + + if (!sync) + return -pte_nosync; + + *offset = sync - begin; + return 0; +} + +const struct pt_config * +pt_qry_get_config(const struct pt_query_decoder *decoder) +{ + if (!decoder) + return NULL; + + return &decoder->config; +} + +static int pt_qry_cache_tnt(struct pt_query_decoder *decoder) +{ + int errcode; + + for (;;) { + const struct pt_decoder_function *dfun; + + dfun = decoder->next; + if (!dfun) + return pt_qry_provoke_fetch_error(decoder); + + if (!dfun->decode) + return -pte_internal; + + /* There's an event ahead of us. */ + if (pt_qry_will_event(decoder)) + return -pte_bad_query; + + /* Diagnose a TIP that has not been part of an event. */ + if (dfun->flags & pdff_tip) + return -pte_bad_query; + + /* Clear the decoder's current event so we know when we + * accidentally skipped an event. + */ + decoder->event = NULL; + + /* Apply the decoder function. */ + errcode = dfun->decode(decoder); + if (errcode) + return errcode; + + /* If we skipped an event, we're in trouble. */ + if (decoder->event) + return -pte_event_ignored; + + /* We're done when we decoded a TNT packet. */ + if (dfun->flags & pdff_tnt) + break; + + /* Read ahead until the next query-relevant packet. */ + errcode = pt_qry_read_ahead(decoder); + if (errcode) + return errcode; + } + + /* Read ahead until the next query-relevant packet. */ + errcode = pt_qry_read_ahead(decoder); + if ((errcode < 0) && (errcode != -pte_eos)) + return errcode; + + return 0; +} + +int pt_qry_cond_branch(struct pt_query_decoder *decoder, int *taken) +{ + int errcode, query; + + if (!decoder || !taken) + return -pte_invalid; + + /* We cache the latest tnt packet in the decoder. Let's re-fill the + * cache in case it is empty. + */ + if (pt_tnt_cache_is_empty(&decoder->tnt)) { + errcode = pt_qry_cache_tnt(decoder); + if (errcode < 0) + return errcode; + } + + query = pt_tnt_cache_query(&decoder->tnt); + if (query < 0) + return query; + + *taken = query; + + return pt_qry_status_flags(decoder); +} + +int pt_qry_indirect_branch(struct pt_query_decoder *decoder, uint64_t *addr) +{ + int errcode, flags; + + if (!decoder || !addr) + return -pte_invalid; + + flags = 0; + for (;;) { + const struct pt_decoder_function *dfun; + + dfun = decoder->next; + if (!dfun) + return pt_qry_provoke_fetch_error(decoder); + + if (!dfun->decode) + return -pte_internal; + + /* There's an event ahead of us. */ + if (pt_qry_will_event(decoder)) + return -pte_bad_query; + + /* Clear the decoder's current event so we know when we + * accidentally skipped an event. + */ + decoder->event = NULL; + + /* We may see a single TNT packet if the current tnt is empty. + * + * If we see a TNT while the current tnt is not empty, it means + * that our user got out of sync. Let's report no data and hope + * that our user is able to re-sync. + */ + if ((dfun->flags & pdff_tnt) && + !pt_tnt_cache_is_empty(&decoder->tnt)) + return -pte_bad_query; + + /* Apply the decoder function. */ + errcode = dfun->decode(decoder); + if (errcode) + return errcode; + + /* If we skipped an event, we're in trouble. */ + if (decoder->event) + return -pte_event_ignored; + + /* We're done when we found a TIP packet that isn't part of an + * event. + */ + if (dfun->flags & pdff_tip) { + uint64_t ip; + + /* We already decoded it, so the branch destination + * is stored in the decoder's last ip. + */ + errcode = pt_last_ip_query(&ip, &decoder->ip); + if (errcode < 0) + flags |= pts_ip_suppressed; + else + *addr = ip; + + break; + } + + /* Read ahead until the next query-relevant packet. */ + errcode = pt_qry_read_ahead(decoder); + if (errcode) + return errcode; + } + + /* Read ahead until the next query-relevant packet. */ + errcode = pt_qry_read_ahead(decoder); + if ((errcode < 0) && (errcode != -pte_eos)) + return errcode; + + flags |= pt_qry_status_flags(decoder); + + return flags; +} + +int pt_qry_event(struct pt_query_decoder *decoder, struct pt_event *event, + size_t size) +{ + int errcode, flags; + + if (!decoder || !event) + return -pte_invalid; + + if (size < offsetof(struct pt_event, variant)) + return -pte_invalid; + + /* We do not allow querying for events while there are still TNT + * bits to consume. + */ + if (!pt_tnt_cache_is_empty(&decoder->tnt)) + return -pte_bad_query; + + /* Do not provide more than we actually have. */ + if (sizeof(*event) < size) + size = sizeof(*event); + + flags = 0; + for (;;) { + const struct pt_decoder_function *dfun; + + dfun = decoder->next; + if (!dfun) + return pt_qry_provoke_fetch_error(decoder); + + if (!dfun->decode) + return -pte_internal; + + /* We must not see a TIP or TNT packet unless it belongs + * to an event. + * + * If we see one, it means that our user got out of sync. + * Let's report no data and hope that our user is able + * to re-sync. + */ + if ((dfun->flags & (pdff_tip | pdff_tnt)) && + !pt_qry_will_event(decoder)) + return -pte_bad_query; + + /* Clear the decoder's current event so we know when decoding + * produces a new event. + */ + decoder->event = NULL; + + /* Apply any other decoder function. */ + errcode = dfun->decode(decoder); + if (errcode) + return errcode; + + /* Check if there has been an event. + * + * Some packets may result in events in some but not in all + * configurations. + */ + if (decoder->event) { + (void) memcpy(event, decoder->event, size); + break; + } + + /* Read ahead until the next query-relevant packet. */ + errcode = pt_qry_read_ahead(decoder); + if (errcode) + return errcode; + } + + /* Read ahead until the next query-relevant packet. */ + errcode = pt_qry_read_ahead(decoder); + if ((errcode < 0) && (errcode != -pte_eos)) + return errcode; + + flags |= pt_qry_status_flags(decoder); + + return flags; +} + +int pt_qry_time(struct pt_query_decoder *decoder, uint64_t *time, + uint32_t *lost_mtc, uint32_t *lost_cyc) +{ + if (!decoder || !time) + return -pte_invalid; + + return pt_time_query_tsc(time, lost_mtc, lost_cyc, &decoder->time); +} + +int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder, uint32_t *cbr) +{ + if (!decoder || !cbr) + return -pte_invalid; + + return pt_time_query_cbr(cbr, &decoder->time); +} + +static void pt_qry_add_event_time(struct pt_event *event, + const struct pt_query_decoder *decoder) +{ + int errcode; + + if (!event || !decoder) + return; + + errcode = pt_time_query_tsc(&event->tsc, &event->lost_mtc, + &event->lost_cyc, &decoder->time); + if (errcode >= 0) + event->has_tsc = 1; +} + +int pt_qry_decode_unknown(struct pt_query_decoder *decoder) +{ + struct pt_packet packet; + int size; + + size = pt_pkt_read_unknown(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + decoder->pos += size; + return 0; +} + +int pt_qry_decode_pad(struct pt_query_decoder *decoder) +{ + decoder->pos += ptps_pad; + + return 0; +} + +static int pt_qry_read_psb_header(struct pt_query_decoder *decoder) +{ + pt_last_ip_init(&decoder->ip); + + for (;;) { + const struct pt_decoder_function *dfun; + int errcode; + + errcode = pt_df_fetch(&decoder->next, decoder->pos, + &decoder->config); + if (errcode) + return errcode; + + dfun = decoder->next; + if (!dfun) + return -pte_internal; + + /* We're done once we reach an psbend packet. */ + if (dfun->flags & pdff_psbend) + return 0; + + if (!dfun->header) + return -pte_bad_context; + + errcode = dfun->header(decoder); + if (errcode) + return errcode; + } +} + +int pt_qry_decode_psb(struct pt_query_decoder *decoder) +{ + const uint8_t *pos; + int size, errcode; + + pos = decoder->pos; + + size = pt_pkt_read_psb(pos, &decoder->config); + if (size < 0) + return size; + + decoder->pos += size; + + errcode = pt_qry_read_psb_header(decoder); + if (errcode < 0) { + /* Move back to the PSB so we have a chance to recover and + * continue decoding. + */ + decoder->pos = pos; + + /* Clear any PSB+ events that have already been queued. */ + (void) pt_evq_clear(&decoder->evq, evb_psbend); + + /* Reset the decoder's decode function. */ + decoder->next = &pt_decode_psb; + + return errcode; + } + + /* The next packet following the PSB header will be of type PSBEND. + * + * Decoding this packet will publish the PSB events what have been + * accumulated while reading the PSB header. + */ + return 0; +} + +static void pt_qry_add_event_ip(struct pt_event *event, uint64_t *ip, + const struct pt_query_decoder *decoder) +{ + int errcode; + + errcode = pt_last_ip_query(ip, &decoder->ip); + if (errcode < 0) + event->ip_suppressed = 1; +} + +/* Decode a generic IP packet. + * + * Returns the number of bytes read, on success. + * Returns -pte_eos if the ip does not fit into the buffer. + * Returns -pte_bad_packet if the ip compression is not known. + */ +static int pt_qry_decode_ip(struct pt_query_decoder *decoder) +{ + struct pt_packet_ip packet; + int errcode, size; + + size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config); + if (errcode < 0) + return errcode; + + /* We do not update the decoder's position, yet. */ + + return size; +} + +static int pt_qry_consume_tip(struct pt_query_decoder *decoder, int size) +{ + decoder->pos += size; + return 0; +} + +int pt_qry_decode_tip(struct pt_query_decoder *decoder) +{ + struct pt_event *ev; + int size; + + size = pt_qry_decode_ip(decoder); + if (size < 0) + return size; + + /* Process any pending events binding to TIP. */ + ev = pt_evq_dequeue(&decoder->evq, evb_tip); + if (ev) { + switch (ev->type) { + default: + return -pte_internal; + + case ptev_async_branch: + pt_qry_add_event_ip(ev, &ev->variant.async_branch.to, + decoder); + + decoder->consume_packet = 1; + + break; + + case ptev_async_paging: + pt_qry_add_event_ip(ev, &ev->variant.async_paging.ip, + decoder); + break; + + case ptev_async_vmcs: + pt_qry_add_event_ip(ev, &ev->variant.async_vmcs.ip, + decoder); + break; + + case ptev_exec_mode: + pt_qry_add_event_ip(ev, &ev->variant.exec_mode.ip, + decoder); + break; + } + + /* Publish the event. */ + decoder->event = ev; + + /* Process further pending events. */ + if (pt_evq_pending(&decoder->evq, evb_tip)) + return 0; + + /* No further events. + * + * If none of the events consumed the packet, we're done. + */ + if (!decoder->consume_packet) + return 0; + + /* We're done with this packet. Clear the flag we set previously + * and consume it. + */ + decoder->consume_packet = 0; + } + + return pt_qry_consume_tip(decoder, size); +} + +int pt_qry_decode_tnt_8(struct pt_query_decoder *decoder) +{ + struct pt_packet_tnt packet; + int size, errcode; + + size = pt_pkt_read_tnt_8(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet, + &decoder->config); + if (errcode < 0) + return errcode; + + decoder->pos += size; + return 0; +} + +int pt_qry_decode_tnt_64(struct pt_query_decoder *decoder) +{ + struct pt_packet_tnt packet; + int size, errcode; + + size = pt_pkt_read_tnt_64(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet, + &decoder->config); + if (errcode < 0) + return errcode; + + decoder->pos += size; + return 0; +} + +static int pt_qry_consume_tip_pge(struct pt_query_decoder *decoder, int size) +{ + decoder->pos += size; + return 0; +} + +int pt_qry_decode_tip_pge(struct pt_query_decoder *decoder) +{ + struct pt_event *ev; + int size; + + size = pt_qry_decode_ip(decoder); + if (size < 0) + return size; + + /* We send the enable event first. This is more convenient for our users + * and does not require them to either store or blindly apply other + * events that might be pending. + * + * We use the consume packet decoder flag to indicate this. + */ + if (!decoder->consume_packet) { + uint64_t ip; + int errcode; + + /* We can't afford a suppressed IP, here. */ + errcode = pt_last_ip_query(&ip, &decoder->ip); + if (errcode < 0) + return -pte_bad_packet; + + /* This packet signals a standalone enabled event. */ + ev = pt_evq_standalone(&decoder->evq); + if (!ev) + return -pte_internal; + ev->type = ptev_enabled; + ev->variant.enabled.ip = ip; + + pt_qry_add_event_time(ev, decoder); + + /* Discard any cached TNT bits. + * + * They should have been consumed at the corresponding disable + * event. If they have not, for whatever reason, discard them + * now so our user does not get out of sync. + */ + pt_tnt_cache_init(&decoder->tnt); + + /* Process pending events next. */ + decoder->consume_packet = 1; + decoder->enabled = 1; + } else { + /* Process any pending events binding to TIP. */ + ev = pt_evq_dequeue(&decoder->evq, evb_tip); + if (ev) { + switch (ev->type) { + default: + return -pte_internal; + + case ptev_exec_mode: + pt_qry_add_event_ip(ev, + &ev->variant.exec_mode.ip, + decoder); + break; + } + } + } + + /* We must have an event. Either the initial enable event or one of the + * queued events. + */ + if (!ev) + return -pte_internal; + + /* Publish the event. */ + decoder->event = ev; + + /* Process further pending events. */ + if (pt_evq_pending(&decoder->evq, evb_tip)) + return 0; + + /* We must consume the packet. */ + if (!decoder->consume_packet) + return -pte_internal; + + decoder->consume_packet = 0; + + return pt_qry_consume_tip_pge(decoder, size); +} + +static int pt_qry_consume_tip_pgd(struct pt_query_decoder *decoder, int size) +{ + decoder->enabled = 0; + decoder->pos += size; + return 0; +} + +int pt_qry_decode_tip_pgd(struct pt_query_decoder *decoder) +{ + struct pt_event *ev; + uint64_t at; + int size; + + size = pt_qry_decode_ip(decoder); + if (size < 0) + return size; + + /* Process any pending events binding to TIP. */ + ev = pt_evq_dequeue(&decoder->evq, evb_tip); + if (ev) { + /* The only event we expect is an async branch. */ + if (ev->type != ptev_async_branch) + return -pte_internal; + + /* We do not expect any further events. */ + if (pt_evq_pending(&decoder->evq, evb_tip)) + return -pte_internal; + + /* Turn the async branch into an async disable. */ + at = ev->variant.async_branch.from; + + ev->type = ptev_async_disabled; + ev->variant.async_disabled.at = at; + pt_qry_add_event_ip(ev, &ev->variant.async_disabled.ip, + decoder); + } else { + /* This packet signals a standalone disabled event. */ + ev = pt_evq_standalone(&decoder->evq); + if (!ev) + return -pte_internal; + ev->type = ptev_disabled; + pt_qry_add_event_ip(ev, &ev->variant.disabled.ip, decoder); + pt_qry_add_event_time(ev, decoder); + } + + /* Publish the event. */ + decoder->event = ev; + + return pt_qry_consume_tip_pgd(decoder, size); +} + +static int pt_qry_consume_fup(struct pt_query_decoder *decoder, int size) +{ + decoder->pos += size; + return 0; +} + +static int scan_for_erratum_bdm70(struct pt_packet_decoder *decoder) +{ + for (;;) { + struct pt_packet packet; + int errcode; + + errcode = pt_pkt_next(decoder, &packet, sizeof(packet)); + if (errcode < 0) { + /* Running out of packets is not an error. */ + if (errcode == -pte_eos) + errcode = 0; + + return errcode; + } + + switch (packet.type) { + default: + /* All other packets cancel our search. + * + * We do not enumerate those packets since we also + * want to include new packets. + */ + return 0; + + case ppt_tip_pge: + /* We found it - the erratum applies. */ + return 1; + + case ppt_pad: + case ppt_tsc: + case ppt_cbr: + case ppt_psbend: + case ppt_pip: + case ppt_mode: + case ppt_vmcs: + case ppt_tma: + case ppt_mtc: + case ppt_cyc: + case ppt_mnt: + /* Intentionally skip a few packets. */ + continue; + } + } +} + +static int check_erratum_bdm70(const uint8_t *pos, + const struct pt_config *config) +{ + struct pt_packet_decoder decoder; + int errcode; + + if (!pos || !config) + return -pte_internal; + + errcode = pt_pkt_decoder_init(&decoder, config); + if (errcode < 0) + return errcode; + + errcode = pt_pkt_sync_set(&decoder, (uint64_t) (pos - config->begin)); + if (errcode >= 0) + errcode = scan_for_erratum_bdm70(&decoder); + + pt_pkt_decoder_fini(&decoder); + return errcode; +} + +int pt_qry_header_fup(struct pt_query_decoder *decoder) +{ + struct pt_packet_ip packet; + int errcode, size; + + size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + if (decoder->config.errata.bdm70 && !decoder->enabled) { + errcode = check_erratum_bdm70(decoder->pos + size, + &decoder->config); + if (errcode < 0) + return errcode; + + if (errcode) + return pt_qry_consume_fup(decoder, size); + } + + errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config); + if (errcode < 0) + return errcode; + + /* Tracing is enabled if we have an IP in the header. */ + if (packet.ipc != pt_ipc_suppressed) + decoder->enabled = 1; + + return pt_qry_consume_fup(decoder, size); +} + +int pt_qry_decode_fup(struct pt_query_decoder *decoder) +{ + struct pt_event *ev; + int size; + + size = pt_qry_decode_ip(decoder); + if (size < 0) + return size; + + /* Process any pending events binding to FUP. */ + ev = pt_evq_dequeue(&decoder->evq, evb_fup); + if (ev) { + switch (ev->type) { + default: + return -pte_internal; + + case ptev_overflow: { + uint64_t ip; + int errcode; + + /* We can't afford a suppressed IP, here. */ + errcode = pt_last_ip_query(&ip, &decoder->ip); + if (errcode < 0) + return -pte_bad_packet; + + ev->variant.overflow.ip = ip; + + decoder->consume_packet = 1; + } + break; + + case ptev_tsx: + pt_qry_add_event_ip(ev, &ev->variant.tsx.ip, decoder); + + if (!(ev->variant.tsx.aborted)) + decoder->consume_packet = 1; + + break; + } + + /* Publish the event. */ + decoder->event = ev; + + /* Process further pending events. */ + if (pt_evq_pending(&decoder->evq, evb_fup)) + return 0; + + /* No further events. + * + * If none of the events consumed the packet, we're done. + */ + if (!decoder->consume_packet) + return 0; + + /* We're done with this packet. Clear the flag we set previously + * and consume it. + */ + decoder->consume_packet = 0; + } else { + /* FUP indicates an async branch event; it binds to TIP. + * + * We do need an IP in this case. + */ + uint64_t ip; + int errcode; + + errcode = pt_last_ip_query(&ip, &decoder->ip); + if (errcode < 0) + return -pte_bad_packet; + + ev = pt_evq_enqueue(&decoder->evq, evb_tip); + if (!ev) + return -pte_nomem; + + ev->type = ptev_async_branch; + ev->variant.async_branch.from = ip; + + pt_qry_add_event_time(ev, decoder); + } + + return pt_qry_consume_fup(decoder, size); +} + +int pt_qry_decode_pip(struct pt_query_decoder *decoder) +{ + struct pt_packet_pip packet; + struct pt_event *event; + int size; + + size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + /* Paging events are either standalone or bind to the same TIP packet + * as an in-flight async branch event. + */ + event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch); + if (!event) { + event = pt_evq_standalone(&decoder->evq); + if (!event) + return -pte_internal; + event->type = ptev_paging; + event->variant.paging.cr3 = packet.cr3; + event->variant.paging.non_root = packet.nr; + + pt_qry_add_event_time(event, decoder); + + decoder->event = event; + } else { + event = pt_evq_enqueue(&decoder->evq, evb_tip); + if (!event) + return -pte_nomem; + + event->type = ptev_async_paging; + event->variant.async_paging.cr3 = packet.cr3; + event->variant.async_paging.non_root = packet.nr; + + pt_qry_add_event_time(event, decoder); + } + + decoder->pos += size; + return 0; +} + +int pt_qry_header_pip(struct pt_query_decoder *decoder) +{ + struct pt_packet_pip packet; + struct pt_event *event; + int size; + + size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + /* Paging events are reported at the end of the PSB. */ + event = pt_evq_enqueue(&decoder->evq, evb_psbend); + if (!event) + return -pte_nomem; + + event->type = ptev_async_paging; + event->variant.async_paging.cr3 = packet.cr3; + event->variant.async_paging.non_root = packet.nr; + + decoder->pos += size; + return 0; +} + +static int pt_qry_process_pending_psb_events(struct pt_query_decoder *decoder) +{ + struct pt_event *ev; + + ev = pt_evq_dequeue(&decoder->evq, evb_psbend); + if (!ev) + return 0; + + switch (ev->type) { + default: + return -pte_internal; + + case ptev_async_paging: + pt_qry_add_event_ip(ev, &ev->variant.async_paging.ip, decoder); + break; + + case ptev_exec_mode: + pt_qry_add_event_ip(ev, &ev->variant.exec_mode.ip, decoder); + break; + + case ptev_tsx: + pt_qry_add_event_ip(ev, &ev->variant.tsx.ip, decoder); + break; + + case ptev_async_vmcs: + pt_qry_add_event_ip(ev, &ev->variant.async_vmcs.ip, decoder); + break; + } + + pt_qry_add_event_time(ev, decoder); + + /* PSB+ events are status updates. */ + ev->status_update = 1; + + /* Publish the event. */ + decoder->event = ev; + + /* Signal a pending event. */ + return 1; +} + +/* Processes packets as long as the packet's event flag matches @pdff. + * + * Returns zero on success; a negative error code otherwise. + */ +static int pt_qry_read_ahead_while(struct pt_query_decoder *decoder, + uint32_t pdff) +{ + for (;;) { + const struct pt_decoder_function *dfun; + int errcode; + + errcode = pt_df_fetch(&decoder->next, decoder->pos, + &decoder->config); + if (errcode < 0) + return errcode; + + dfun = decoder->next; + if (!dfun) + return -pte_internal; + + if (!dfun->decode) + return -pte_bad_context; + + if (!(dfun->flags & pdff)) + return 0; + + errcode = dfun->decode(decoder); + if (errcode < 0) + return errcode; + } +} + +/* Recover from SKD010. + * + * Creates and publishes an overflow event at @packet's IP payload. + * + * Further updates @decoder as follows: + * + * - set time tracking to @time and @tcal + * - set the position to @offset + * - set ip to @packet's IP payload + * - set tracing to be enabled + * + * Returns 1 on success, a negative error code otherwise. + */ +static int skd010_recover(struct pt_query_decoder *decoder, + const struct pt_packet_ip *packet, + const struct pt_time_cal *tcal, + const struct pt_time *time, uint64_t offset) +{ + struct pt_last_ip ip; + struct pt_event *ev; + int errcode; + + if (!decoder || !packet || !tcal || !time) + return -pte_internal; + + /* We use the decoder's IP. It should be newly initialized. */ + ip = decoder->ip; + + /* Extract the IP payload from the packet. */ + errcode = pt_last_ip_update_ip(&ip, packet, &decoder->config); + if (errcode < 0) + return errcode; + + /* Synthesize the overflow event. */ + ev = pt_evq_standalone(&decoder->evq); + if (!ev) + return -pte_internal; + + ev->type = ptev_overflow; + + /* We do need a full IP. */ + errcode = pt_last_ip_query(&ev->variant.overflow.ip, &ip); + if (errcode < 0) + return -pte_bad_context; + + /* We continue decoding at the given offset. */ + decoder->pos = decoder->config.begin + offset; + + /* Tracing is enabled. */ + decoder->enabled = 1; + decoder->ip = ip; + + decoder->time = *time; + decoder->tcal = *tcal; + + /* After updating the decoder's time, we can fill in the event + * timestamp. + */ + pt_qry_add_event_time(ev, decoder); + + /* Publish the event. */ + decoder->event = ev; + return 1; +} + +/* Scan ahead for a packet at which to resume after an overflow. + * + * This function is called after an OVF without a corresponding FUP. This + * normally means that the overflow resolved while tracing was disabled. + * + * With erratum SKD010 it might also mean that the FUP (or TIP.PGE) was dropped. + * The overflow thus resolved while tracing was enabled (or tracing was enabled + * after the overflow resolved). Search for an indication whether tracing is + * enabled or disabled by scanning upcoming packets. + * + * If we can confirm that tracing is disabled, the erratum does not apply and we + * can continue normally. + * + * If we can confirm that tracing is enabled, the erratum applies and we try to + * recover by synchronizing at a later packet and a different IP. If we can't + * recover, pretend the erratum didn't apply so we run into the error later. + * Since this assumes that tracing is disabled, no harm should be done, i.e. no + * bad trace should be generated. + * + * Returns a positive value if the overflow is handled. + * Returns zero if the overflow is not yet handled. + * Returns a negative error code otherwise. + */ +static int skd010_scan_for_ovf_resume(struct pt_packet_decoder *pkt, + struct pt_query_decoder *decoder) +{ + struct pt_time_cal tcal; + struct pt_time time; + struct { + struct pt_time_cal tcal; + struct pt_time time; + uint64_t offset; + } mode_tsx; + int errcode; + + /* Keep track of time as we skip packets. */ + time = decoder->time; + tcal = decoder->tcal; + + /* Keep track of a potential recovery point at MODE.TSX. */ + memset(&mode_tsx, 0, sizeof(mode_tsx)); + + for (;;) { + struct pt_packet packet; + uint64_t offset; + + errcode = pt_pkt_get_offset(pkt, &offset); + if (errcode < 0) + return errcode; + + errcode = pt_pkt_next(pkt, &packet, sizeof(packet)); + if (errcode < 0) { + /* Let's assume the trace is correct if we run out + * of packets. + */ + if (errcode == -pte_eos) + errcode = 0; + + return errcode; + } + + switch (packet.type) { + case ppt_tip_pge: + /* Everything is fine. There is nothing to do. */ + return 0; + + case ppt_tip_pgd: + /* This is a clear indication that the erratum + * apllies. + * + * We synchronize after the disable. + */ + + decoder->time = time; + decoder->tcal = tcal; + decoder->pos = decoder->config.begin + offset + + packet.size; + + /* Even though the erratum applies, tracing is disabled + * at the time we're able to resync. We can use the + * normal code path. + */ + return 0; + + case ppt_tnt_8: + case ppt_tnt_64: + /* This is a clear indication that the erratum + * apllies. + * + * Yet, we can't recover from it as we wouldn't know how + * many TNT bits will have been used when we eventually + * find an IP packet at which to resume tracing. + */ + return 0; + + case ppt_pip: + case ppt_vmcs: + /* We could track those changes and synthesize extra + * events after the overflow event when recovering from + * the erratum. This requires infrastructure that we + * don't currently have, though, so we're not going to + * do it. + * + * Instead, we ignore those changes. We already don't + * know how many other changes were lost in the + * overflow. + */ + break; + + case ppt_mode: + switch (packet.payload.mode.leaf) { + case pt_mol_exec: + /* A MODE.EXEC packet binds to TIP, i.e. + * + * TIP.PGE: everything is fine + * TIP: the erratum applies + * + * In the TIP.PGE case, we may just follow the + * normal code flow. + * + * In the TIP case, we'd be able to re-sync at + * the TIP IP but have to skip packets up to and + * including the TIP. + * + * We'd need to synthesize the MODE.EXEC event + * after the overflow event when recovering at + * the TIP. We lack the infrastructure for this + * - it's getting too complicated. + * + * Instead, we ignore the execution mode change; + * we already don't know how many more such + * changes were lost in the overflow. + */ + break; + + case pt_mol_tsx: + /* A MODE.TSX packet may be standalone or bind + * to FUP. + * + * If this is the second MODE.TSX, we're sure + * that tracing is disabled and everything is + * fine. + */ + if (mode_tsx.offset) + return 0; + + /* If we find the FUP this packet binds to, we + * may recover at the FUP IP and restart + * processing packets from here. Remember the + * current state. + */ + mode_tsx.offset = offset; + mode_tsx.time = time; + mode_tsx.tcal = tcal; + + break; + } + + break; + + case ppt_fup: + /* This is a pretty good indication that tracing + * is indeed enabled and the erratum applies. + */ + + /* If we got a MODE.TSX packet before, we synchronize at + * the FUP IP but continue decoding packets starting + * from the MODE.TSX. + */ + if (mode_tsx.offset) + return skd010_recover(decoder, + &packet.payload.ip, + &mode_tsx.tcal, + &mode_tsx.time, + mode_tsx.offset); + + /* Without a preceding MODE.TSX, this FUP is the start + * of an async branch or disable. We synchronize at the + * FUP IP and continue decoding packets from here. + */ + return skd010_recover(decoder, &packet.payload.ip, + &tcal, &time, offset); + + case ppt_tip: + /* We syhchronize at the TIP IP and continue decoding + * packets after the TIP packet. + */ + return skd010_recover(decoder, &packet.payload.ip, + &tcal, &time, + offset + packet.size); + + case ppt_psb: + /* We reached a synchronization point. Tracing is + * enabled if and only if the PSB+ contains a FUP. + */ + errcode = pt_qry_find_header_fup(&packet, pkt); + if (errcode < 0) { + /* If we ran out of packets, we can't tell. + * Let's assume the trace is correct. + */ + if (errcode == -pte_eos) + errcode = 0; + + return errcode; + } + + /* If there is no FUP, tracing is disabled and + * everything is fine. + */ + if (!errcode) + return 0; + + /* We should have a FUP. */ + if (packet.type != ppt_fup) + return -pte_internal; + + /* Otherwise, we may synchronize at the FUP IP and + * continue decoding packets at the PSB. + */ + return skd010_recover(decoder, &packet.payload.ip, + &tcal, &time, offset); + + case ppt_psbend: + /* We shouldn't see this. */ + return -pte_bad_context; + + case ppt_ovf: + case ppt_stop: + /* It doesn't matter if it had been enabled or disabled + * before. We may resume normally. + */ + return 0; + + case ppt_unknown: + case ppt_invalid: + /* We can't skip this packet. */ + return 0; + + case ppt_pad: + case ppt_mnt: + /* Ignore this packet. */ + break; + + case ppt_tsc: + /* Keep track of time. */ + errcode = pt_qry_apply_tsc(&time, &tcal, + &packet.payload.tsc, + &decoder->config); + if (errcode < 0) + return errcode; + + break; + + case ppt_cbr: + /* Keep track of time. */ + errcode = pt_qry_apply_cbr(&time, &tcal, + &packet.payload.cbr, + &decoder->config); + if (errcode < 0) + return errcode; + + break; + + case ppt_tma: + /* Keep track of time. */ + errcode = pt_qry_apply_tma(&time, &tcal, + &packet.payload.tma, + &decoder->config); + if (errcode < 0) + return errcode; + + break; + + case ppt_mtc: + /* Keep track of time. */ + errcode = pt_qry_apply_mtc(&time, &tcal, + &packet.payload.mtc, + &decoder->config); + if (errcode < 0) + return errcode; + + break; + + case ppt_cyc: + /* Keep track of time. */ + errcode = pt_qry_apply_cyc(&time, &tcal, + &packet.payload.cyc, + &decoder->config); + if (errcode < 0) + return errcode; + + break; + } + } +} + +static int pt_qry_handle_skd010(struct pt_query_decoder *decoder) +{ + struct pt_packet_decoder pkt; + uint64_t offset; + int errcode; + + if (!decoder) + return -pte_internal; + + errcode = pt_qry_get_offset(decoder, &offset); + if (errcode < 0) + return errcode; + + errcode = pt_pkt_decoder_init(&pkt, &decoder->config); + if (errcode < 0) + return errcode; + + errcode = pt_pkt_sync_set(&pkt, offset); + if (errcode >= 0) + errcode = skd010_scan_for_ovf_resume(&pkt, decoder); + + pt_pkt_decoder_fini(&pkt); + return errcode; +} + +int pt_qry_decode_ovf(struct pt_query_decoder *decoder) +{ + const struct pt_decoder_function *dfun; + struct pt_event *ev; + struct pt_time time; + int status, errcode; + + status = pt_qry_process_pending_psb_events(decoder); + if (status < 0) + return status; + + /* If we have any pending psbend events, we're done for now. */ + if (status) + return 0; + + /* Reset the decoder state but preserve timing. */ + time = decoder->time; + pt_qry_reset(decoder); + decoder->time = time; + + /* We must consume the OVF before we search for the binding packet. */ + decoder->pos += ptps_ovf; + + /* Overflow binds to either FUP or TIP.PGE. + * + * If the overflow can be resolved while PacketEn=1 it binds to FUP. We + * can see timing packets between OVF anf FUP but that's it. + * + * Otherwise, PacketEn will be zero when the overflow resolves and OVF + * binds to TIP.PGE. There can be packets between OVF and TIP.PGE that + * do not depend on PacketEn. + * + * We don't need to decode everything until TIP.PGE, however. As soon + * as we see a non-timing non-FUP packet, we know that tracing has been + * disabled before the overflow resolves. + */ + errcode = pt_qry_read_ahead_while(decoder, pdff_timing | pdff_pad); + if (errcode < 0) { + if (errcode != -pte_eos) + return errcode; + + dfun = NULL; + } else { + dfun = decoder->next; + if (!dfun) + return -pte_internal; + } + + if (dfun && (dfun->flags & pdff_fup)) { + ev = pt_evq_enqueue(&decoder->evq, evb_fup); + if (!ev) + return -pte_internal; + + ev->type = ptev_overflow; + + /* We set tracing to disabled in pt_qry_reset(); fix it. */ + decoder->enabled = 1; + } else { + /* Check for erratum SKD010. + * + * The FUP may have been dropped. If we can figure out that + * tracing is enabled and hence the FUP is missing, we resume + * at a later packet and a different IP. + */ + if (decoder->config.errata.skd010) { + errcode = pt_qry_handle_skd010(decoder); + if (errcode < 0) + return errcode; + + if (errcode) + return 0; + } + + ev = pt_evq_standalone(&decoder->evq); + if (!ev) + return -pte_internal; + + ev->type = ptev_overflow; + + /* We suppress the IP to indicate that tracing has been + * disabled before the overflow resolved. There can be + * several events before tracing is enabled again. + */ + ev->ip_suppressed = 1; + + /* Publish the event. */ + decoder->event = ev; + } + + pt_qry_add_event_time(ev, decoder); + + return 0; +} + +static int pt_qry_decode_mode_exec(struct pt_query_decoder *decoder, + const struct pt_packet_mode_exec *packet) +{ + struct pt_event *event; + + /* MODE.EXEC binds to TIP. */ + event = pt_evq_enqueue(&decoder->evq, evb_tip); + if (!event) + return -pte_nomem; + + event->type = ptev_exec_mode; + event->variant.exec_mode.mode = pt_get_exec_mode(packet); + + pt_qry_add_event_time(event, decoder); + + return 0; +} + +static int pt_qry_decode_mode_tsx(struct pt_query_decoder *decoder, + const struct pt_packet_mode_tsx *packet) +{ + struct pt_event *event; + + /* MODE.TSX is standalone if tracing is disabled. */ + if (!decoder->enabled) { + event = pt_evq_standalone(&decoder->evq); + if (!event) + return -pte_internal; + + /* We don't have an IP in this case. */ + event->variant.tsx.ip = 0; + event->ip_suppressed = 1; + + /* Publish the event. */ + decoder->event = event; + } else { + /* MODE.TSX binds to FUP. */ + event = pt_evq_enqueue(&decoder->evq, evb_fup); + if (!event) + return -pte_nomem; + } + + event->type = ptev_tsx; + event->variant.tsx.speculative = packet->intx; + event->variant.tsx.aborted = packet->abrt; + + pt_qry_add_event_time(event, decoder); + + return 0; +} + +int pt_qry_decode_mode(struct pt_query_decoder *decoder) +{ + struct pt_packet_mode packet; + int size, errcode; + + size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + errcode = 0; + switch (packet.leaf) { + case pt_mol_exec: + errcode = pt_qry_decode_mode_exec(decoder, &packet.bits.exec); + break; + + case pt_mol_tsx: + errcode = pt_qry_decode_mode_tsx(decoder, &packet.bits.tsx); + break; + } + + if (errcode < 0) + return errcode; + + decoder->pos += size; + return 0; +} + +int pt_qry_header_mode(struct pt_query_decoder *decoder) +{ + struct pt_packet_mode packet; + struct pt_event *event; + int size; + + size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + /* Inside the header, events are reported at the end. */ + event = pt_evq_enqueue(&decoder->evq, evb_psbend); + if (!event) + return -pte_nomem; + + switch (packet.leaf) { + case pt_mol_exec: + event->type = ptev_exec_mode; + event->variant.exec_mode.mode = + pt_get_exec_mode(&packet.bits.exec); + break; + + case pt_mol_tsx: + event->type = ptev_tsx; + event->variant.tsx.speculative = packet.bits.tsx.intx; + event->variant.tsx.aborted = packet.bits.tsx.abrt; + break; + } + + decoder->pos += size; + return 0; +} + +int pt_qry_decode_psbend(struct pt_query_decoder *decoder) +{ + int status; + + status = pt_qry_process_pending_psb_events(decoder); + if (status < 0) + return status; + + /* If we had any psb events, we're done for now. */ + if (status) + return 0; + + /* Skip the psbend extended opcode that we fetched before if no more + * psbend events are pending. + */ + decoder->pos += ptps_psbend; + return 0; +} + +int pt_qry_decode_tsc(struct pt_query_decoder *decoder) +{ + struct pt_packet_tsc packet; + int size, errcode; + + size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + errcode = pt_qry_apply_tsc(&decoder->time, &decoder->tcal, + &packet, &decoder->config); + if (errcode < 0) + return errcode; + + decoder->pos += size; + return 0; +} + +int pt_qry_header_tsc(struct pt_query_decoder *decoder) +{ + struct pt_packet_tsc packet; + int size, errcode; + + size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + errcode = pt_qry_apply_header_tsc(&decoder->time, &decoder->tcal, + &packet, &decoder->config); + if (errcode < 0) + return errcode; + + decoder->pos += size; + return 0; +} + +int pt_qry_decode_cbr(struct pt_query_decoder *decoder) +{ + struct pt_packet_cbr packet; + int size, errcode; + + size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + errcode = pt_qry_apply_cbr(&decoder->time, &decoder->tcal, + &packet, &decoder->config); + if (errcode < 0) + return errcode; + + decoder->pos += size; + return 0; +} + +int pt_qry_header_cbr(struct pt_query_decoder *decoder) +{ + struct pt_packet_cbr packet; + int size, errcode; + + size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + errcode = pt_qry_apply_header_cbr(&decoder->time, &decoder->tcal, + &packet, &decoder->config); + if (errcode < 0) + return errcode; + + decoder->pos += size; + return 0; +} + +int pt_qry_decode_tma(struct pt_query_decoder *decoder) +{ + struct pt_packet_tma packet; + int size, errcode; + + size = pt_pkt_read_tma(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + errcode = pt_qry_apply_tma(&decoder->time, &decoder->tcal, + &packet, &decoder->config); + if (errcode < 0) + return errcode; + + decoder->pos += size; + return 0; +} + +int pt_qry_decode_mtc(struct pt_query_decoder *decoder) +{ + struct pt_packet_mtc packet; + int size, errcode; + + size = pt_pkt_read_mtc(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + errcode = pt_qry_apply_mtc(&decoder->time, &decoder->tcal, + &packet, &decoder->config); + if (errcode < 0) + return errcode; + + decoder->pos += size; + return 0; +} + +static int check_erratum_skd007(struct pt_query_decoder *decoder, + const struct pt_packet_cyc *packet, int size) +{ + const uint8_t *pos; + uint16_t payload; + + if (!decoder || !packet || size < 0) + return -pte_internal; + + /* It must be a 2-byte CYC. */ + if (size != 2) + return 0; + + payload = (uint16_t) packet->value; + + /* The 2nd byte of the CYC payload must look like an ext opcode. */ + if ((payload & ~0x1f) != 0x20) + return 0; + + /* Skip this CYC packet. */ + pos = decoder->pos + size; + if (decoder->config.end <= pos) + return 0; + + /* See if we got a second CYC that looks like an OVF ext opcode. */ + if (*pos != pt_ext_ovf) + return 0; + + /* We shouldn't get back-to-back CYCs unless they are sent when the + * counter wraps around. In this case, we'd expect a full payload. + * + * Since we got two non-full CYC packets, we assume the erratum hit. + */ + + return 1; +} + +int pt_qry_decode_cyc(struct pt_query_decoder *decoder) +{ + struct pt_packet_cyc packet; + struct pt_config *config; + int size, errcode; + + config = &decoder->config; + + size = pt_pkt_read_cyc(&packet, decoder->pos, config); + if (size < 0) + return size; + + if (config->errata.skd007) { + errcode = check_erratum_skd007(decoder, &packet, size); + if (errcode < 0) + return errcode; + + /* If the erratum hits, we ignore the partial CYC and instead + * process the OVF following/overlapping it. + */ + if (errcode) { + /* We skip the first byte of the CYC, which brings us + * to the beginning of the OVF packet. + */ + decoder->pos += 1; + return 0; + } + } + + errcode = pt_qry_apply_cyc(&decoder->time, &decoder->tcal, + &packet, config); + if (errcode < 0) + return errcode; + + decoder->pos += size; + return 0; +} + +int pt_qry_decode_stop(struct pt_query_decoder *decoder) +{ + struct pt_event *event; + + /* Stop events are reported immediately. */ + event = pt_evq_standalone(&decoder->evq); + if (!event) + return -pte_internal; + + event->type = ptev_stop; + + pt_qry_add_event_time(event, decoder); + + decoder->event = event; + decoder->pos += ptps_stop; + return 0; +} + +int pt_qry_header_vmcs(struct pt_query_decoder *decoder) +{ + struct pt_packet_vmcs packet; + struct pt_event *event; + int size; + + size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + event = pt_evq_enqueue(&decoder->evq, evb_psbend); + if (!event) + return -pte_nomem; + + event->type = ptev_async_vmcs; + event->variant.async_vmcs.base = packet.base; + + decoder->pos += size; + return 0; +} + +int pt_qry_decode_vmcs(struct pt_query_decoder *decoder) +{ + struct pt_packet_vmcs packet; + struct pt_event *event; + int size; + + size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config); + if (size < 0) + return size; + + /* VMCS events bind to the same IP as an in-flight async paging event. + * + * In that case, the VMCS event should be applied first. We reorder + * events here to simplify the life of higher layers. + */ + event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_paging); + if (event) { + struct pt_event *paging; + + paging = pt_evq_enqueue(&decoder->evq, evb_tip); + if (!paging) + return -pte_nomem; + + *paging = *event; + + event->type = ptev_async_vmcs; + event->variant.async_vmcs.base = packet.base; + + decoder->pos += size; + return 0; + } + + /* VMCS events bind to the same TIP packet as an in-flight async + * branch event. + */ + event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch); + if (event) { + event = pt_evq_enqueue(&decoder->evq, evb_tip); + if (!event) + return -pte_nomem; + + event->type = ptev_async_vmcs; + event->variant.async_vmcs.base = packet.base; + + decoder->pos += size; + return 0; + } + + /* VMCS events that do not bind to an in-flight async event are + * stand-alone. + */ + event = pt_evq_standalone(&decoder->evq); + if (!event) + return -pte_internal; + + event->type = ptev_vmcs; + event->variant.vmcs.base = packet.base; + + pt_qry_add_event_time(event, decoder); + + decoder->event = event; + decoder->pos += size; + return 0; +} + +int pt_qry_decode_mnt(struct pt_query_decoder *decoder) +{ + decoder->pos += ptps_mnt; + + return 0; +} diff --git a/libipt/src/pt_retstack.c b/libipt/src/pt_retstack.c new file mode 100644 index 0000000..198a171 --- /dev/null +++ b/libipt/src/pt_retstack.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_retstack.h" + +#include "intel-pt.h" + + +void pt_retstack_init(struct pt_retstack *retstack) +{ + if (!retstack) + return; + + retstack->top = 0; + retstack->bottom = 0; +} + +int pt_retstack_is_empty(const struct pt_retstack *retstack) +{ + if (!retstack) + return -pte_invalid; + + return (retstack->top == retstack->bottom ? 1 : 0); +} + +int pt_retstack_pop(struct pt_retstack *retstack, uint64_t *ip) +{ + uint8_t top; + + if (!retstack) + return -pte_invalid; + + top = retstack->top; + + if (top == retstack->bottom) + return -pte_retstack_empty; + + top = (!top ? pt_retstack_size : top - 1); + + retstack->top = top; + + if (ip) + *ip = retstack->stack[top]; + + return 0; +} + +int pt_retstack_push(struct pt_retstack *retstack, uint64_t ip) +{ + uint8_t top, bottom; + + if (!retstack) + return -pte_invalid; + + top = retstack->top; + bottom = retstack->bottom; + + retstack->stack[top] = ip; + + top = (top == pt_retstack_size ? 0 : top + 1); + + if (bottom == top) + bottom = (bottom == pt_retstack_size ? 0 : bottom + 1); + + retstack->top = top; + retstack->bottom = bottom; + + return 0; +} diff --git a/libipt/src/pt_section.c b/libipt/src/pt_section.c new file mode 100644 index 0000000..77c377c --- /dev/null +++ b/libipt/src/pt_section.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_section.h" +#include "pt_block_cache.h" + +#include "intel-pt.h" + +#include +#include +#include + + +static char *dupstr(const char *str) +{ + char *dup; + size_t len; + + if (!str) + return NULL; + + len = strlen(str); + dup = malloc(len + 1); + if (!dup) + return NULL; + + return strcpy(dup, str); +} + +struct pt_section *pt_mk_section(const char *filename, uint64_t offset, + uint64_t size) +{ + struct pt_section *section; + uint64_t fsize; + void *status; + int errcode; + + errcode = pt_section_mk_status(&status, &fsize, filename); + if (errcode < 0) + return NULL; + + /* Fail if the requested @offset lies beyond the end of @file. */ + if (fsize <= offset) + goto out_status; + + /* Truncate @size so the entire range lies within @file. */ + fsize -= offset; + if (fsize < size) + size = fsize; + + section = malloc(sizeof(*section)); + if (!section) + goto out_status; + + memset(section, 0, sizeof(*section)); + + section->filename = dupstr(filename); + section->status = status; + section->offset = offset; + section->size = size; + section->ucount = 1; + +#if defined(FEATURE_THREADS) + + errcode = mtx_init(§ion->lock, mtx_plain); + if (errcode != thrd_success) { + free(section->filename); + free(section); + goto out_status; + } + +#endif /* defined(FEATURE_THREADS) */ + + return section; + +out_status: + free(status); + return NULL; +} + +int pt_section_clone(struct pt_section **pclone, + const struct pt_section *section, uint64_t offset, + uint64_t size) +{ + struct pt_section *clone; + uint64_t begin, end, sbegin, send; + + if (!pclone || !section) + return -pte_internal; + + begin = offset; + end = begin + size; + + sbegin = pt_section_offset(section); + send = sbegin + pt_section_size(section); + + if (begin < sbegin || send < end) + return -pte_internal; + + clone = pt_mk_section(pt_section_filename(section), offset, size); + if (!clone) + return -pte_nomem; + + *pclone = clone; + return 0; +} + +int pt_section_lock(struct pt_section *section) +{ + if (!section) + return -pte_internal; + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_lock(§ion->lock); + if (errcode != thrd_success) + return -pte_bad_lock; + } +#endif /* defined(FEATURE_THREADS) */ + + return 0; +} + +int pt_section_unlock(struct pt_section *section) +{ + if (!section) + return -pte_internal; + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_unlock(§ion->lock); + if (errcode != thrd_success) + return -pte_bad_lock; + } +#endif /* defined(FEATURE_THREADS) */ + + return 0; +} + +static void pt_section_free(struct pt_section *section) +{ + if (!section) + return; + +#if defined(FEATURE_THREADS) + + mtx_destroy(§ion->lock); + +#endif /* defined(FEATURE_THREADS) */ + + free(section->filename); + free(section->status); + free(section); +} + +int pt_section_get(struct pt_section *section) +{ + uint16_t ucount; + int errcode; + + if (!section) + return -pte_internal; + + errcode = pt_section_lock(section); + if (errcode < 0) + return errcode; + + ucount = section->ucount + 1; + if (!ucount) { + (void) pt_section_unlock(section); + return -pte_internal; + } + + section->ucount = ucount; + + return pt_section_unlock(section); +} + +int pt_section_put(struct pt_section *section) +{ + uint16_t ucount, mcount; + int errcode; + + if (!section) + return -pte_internal; + + errcode = pt_section_lock(section); + if (errcode < 0) + return errcode; + + mcount = section->mcount; + ucount = section->ucount; + if (ucount > 1) { + section->ucount = ucount - 1; + return pt_section_unlock(section); + } + + errcode = pt_section_unlock(section); + if (errcode < 0) + return errcode; + + if (!ucount || mcount) + return -pte_internal; + + pt_section_free(section); + return 0; +} + +const char *pt_section_filename(const struct pt_section *section) +{ + if (!section) + return NULL; + + return section->filename; +} + +uint64_t pt_section_size(const struct pt_section *section) +{ + if (!section) + return 0ull; + + return section->size; +} + +uint64_t pt_section_offset(const struct pt_section *section) +{ + if (!section) + return 0ull; + + return section->offset; +} + +int pt_section_add_bcache(struct pt_section *section) +{ + uint32_t cache_size; + + if (!section || section->bcache) + return -pte_internal; + + if (section->disable_bcache) + return 0; + + cache_size = (uint32_t) section->size; + + /* We do not allocate a cache if it would get too big. + * + * We also do not treat failure to allocate a cache as an error. + * Without the cache, decode will be slower but still correct. + */ + if (cache_size == section->size) + section->bcache = pt_bcache_alloc(cache_size); + + return 0; +} + +int pt_section_unmap(struct pt_section *section) +{ + uint16_t mcount; + int errcode, status; + + if (!section) + return -pte_internal; + + errcode = pt_section_lock(section); + if (errcode < 0) + return errcode; + + mcount = section->mcount; + + errcode = -pte_nomap; + if (!mcount) + goto out_unlock; + + section->mcount = mcount -= 1; + if (mcount) + return pt_section_unlock(section); + + errcode = -pte_internal; + if (!section->unmap) + goto out_unlock; + + status = section->unmap(section); + + pt_bcache_free(section->bcache); + section->bcache = NULL; + + errcode = pt_section_unlock(section); + if (errcode < 0) + return errcode; + + return status; + +out_unlock: + (void) pt_section_unlock(section); + return errcode; +} + +int pt_section_read(const struct pt_section *section, uint8_t *buffer, + uint16_t size, uint64_t offset) +{ + uint64_t limit, space; + + if (!section) + return -pte_internal; + + if (!section->read) + return -pte_nomap; + + limit = section->size; + if (limit <= offset) + return -pte_nomap; + + /* Truncate if we try to read past the end of the section. */ + space = limit - offset; + if (space < size) + size = (uint16_t) space; + + return section->read(section, buffer, size, offset); +} diff --git a/libipt/src/pt_section_file.c b/libipt/src/pt_section_file.c new file mode 100644 index 0000000..81568a4 --- /dev/null +++ b/libipt/src/pt_section_file.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_section.h" +#include "pt_section_file.h" + +#include "intel-pt.h" + +#include +#include + + +static int fmap_init(struct pt_sec_file_mapping *mapping) +{ + if (!mapping) + return -pte_internal; + + memset(mapping, 0, sizeof(*mapping)); + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_init(&mapping->lock, mtx_plain); + if (errcode != thrd_success) + return -pte_bad_lock; + } +#endif /* defined(FEATURE_THREADS) */ + + return 0; +} + +static void fmap_fini(struct pt_sec_file_mapping *mapping) +{ + if (!mapping) + return; + + fclose(mapping->file); + +#if defined(FEATURE_THREADS) + + mtx_destroy(&mapping->lock); + +#endif /* defined(FEATURE_THREADS) */ +} + +static int fmap_lock(struct pt_sec_file_mapping *mapping) +{ + if (!mapping) + return -pte_internal; + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_lock(&mapping->lock); + if (errcode != thrd_success) + return -pte_bad_lock; + } +#endif /* defined(FEATURE_THREADS) */ + + return 0; +} + +static int fmap_unlock(struct pt_sec_file_mapping *mapping) +{ + if (!mapping) + return -pte_internal; + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_unlock(&mapping->lock); + if (errcode != thrd_success) + return -pte_bad_lock; + } +#endif /* defined(FEATURE_THREADS) */ + + return 0; +} + +int pt_sec_file_map(struct pt_section *section, FILE *file) +{ + struct pt_sec_file_mapping *mapping; + uint64_t offset, size; + long begin, end, fsize; + int errcode; + + if (!section) + return -pte_internal; + + mapping = section->mapping; + if (mapping) + return -pte_internal; + + offset = section->offset; + size = section->size; + + begin = (long) offset; + end = begin + (long) size; + + /* Check for overflows. */ + if ((uint64_t) begin != offset) + return -pte_bad_image; + + if ((uint64_t) end != (offset + size)) + return -pte_bad_image; + + if (end < begin) + return -pte_bad_image; + + /* Validate that the section lies within the file. */ + errcode = fseek(file, 0, SEEK_END); + if (errcode) + return -pte_bad_image; + + fsize = ftell(file); + if (fsize < 0) + return -pte_bad_image; + + if (fsize < end) + return -pte_bad_image; + + mapping = malloc(sizeof(*mapping)); + if (!mapping) + return -pte_nomem; + + errcode = fmap_init(mapping); + if (errcode < 0) { + free(mapping); + return errcode; + } + + mapping->file = file; + mapping->begin = begin; + mapping->end = end; + + section->mapping = mapping; + section->unmap = pt_sec_file_unmap; + section->read = pt_sec_file_read; + + return pt_section_add_bcache(section); +} + +int pt_sec_file_unmap(struct pt_section *section) +{ + struct pt_sec_file_mapping *mapping; + + if (!section) + return -pte_internal; + + mapping = section->mapping; + + if (!mapping || !section->unmap || !section->read) + return -pte_internal; + + section->mapping = NULL; + section->unmap = NULL; + section->read = NULL; + + fmap_fini(mapping); + free(mapping); + + return 0; +} + +int pt_sec_file_read(const struct pt_section *section, uint8_t *buffer, + uint16_t size, uint64_t offset) +{ + struct pt_sec_file_mapping *mapping; + FILE *file; + long begin; + size_t read; + int errcode; + + if (!buffer || !section) + return -pte_internal; + + mapping = section->mapping; + if (!mapping) + return -pte_internal; + + file = mapping->file; + + /* We already checked in pt_section_read() that the requested memory + * lies within the section's boundaries. + * + * And we checked that the file covers the entire section in + * pt_sec_file_map(). There's no need to check for overflows, again. + */ + begin = mapping->begin + (long) offset; + + errcode = fmap_lock(mapping); + if (errcode < 0) + return errcode; + + errcode = fseek(file, begin, SEEK_SET); + if (errcode) + goto out_unlock; + + read = fread(buffer, 1, size, file); + + errcode = fmap_unlock(mapping); + if (errcode < 0) + return errcode; + + return (int) read; + +out_unlock: + (void) fmap_unlock(mapping); + return -pte_nomap; +} diff --git a/libipt/src/pt_sync.c b/libipt/src/pt_sync.c new file mode 100644 index 0000000..04a487f --- /dev/null +++ b/libipt/src/pt_sync.c @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_sync.h" +#include "pt_packet.h" + +#include "intel-pt.h" + + +/* A psb packet contains a unique 2-byte repeating pattern. + * + * There are only two ways to fill up a 64bit work with such a pattern. + */ +const uint64_t psb_pattern[] = { + ((uint64_t) pt_psb_lohi | (uint64_t) pt_psb_lohi << 16 | + (uint64_t) pt_psb_lohi << 32 | (uint64_t) pt_psb_lohi << 48), + ((uint64_t) pt_psb_hilo | (uint64_t) pt_psb_hilo << 16 | + (uint64_t) pt_psb_hilo << 32 | (uint64_t) pt_psb_hilo << 48) +}; + +static const uint8_t *truncate(const uint8_t *pointer, size_t alignment) +{ + uintptr_t raw = (uintptr_t) pointer; + + raw /= alignment; + raw *= alignment; + + return (const uint8_t *) raw; +} + +static const uint8_t *align(const uint8_t *pointer, size_t alignment) +{ + return truncate(pointer + alignment - 1, alignment); +} + +/* Find a psb packet given a position somewhere in the payload. + * + * Return the position of the psb packet. + * Return NULL, if this is not a psb packet. + */ +static const uint8_t *pt_find_psb(const uint8_t *pos, + const struct pt_config *config) +{ + const uint8_t *begin, *end; + int errcode; + + if (!pos || !config) + return NULL; + + begin = config->begin; + end = config->end; + + /* Navigate to the end of the psb payload pattern. + * + * Beware that PSB is an extended opcode. We must not confuse the extend + * opcode of the following packet as belonging to the PSB. + */ + if (*pos != pt_psb_hi) + pos++; + + for (; (pos + 1) < end; pos += 2) { + uint8_t hi, lo; + + hi = pos[0]; + lo = pos[1]; + + if (hi != pt_psb_hi) + break; + + if (lo != pt_psb_lo) + break; + } + /* + * We're right after the psb payload and within the buffer. + * Navigate to the expected beginning of the psb packet. + */ + pos -= ptps_psb; + + /* Check if we're still inside the buffer. */ + if (pos < begin) + return NULL; + + /* Check that this is indeed a psb packet we're at. */ + if (pos[0] != pt_opc_psb || pos[1] != pt_ext_psb) + return NULL; + + errcode = pt_pkt_read_psb(pos, config); + if (errcode < 0) + return NULL; + + return pos; +} + +static int pt_sync_within_bounds(const uint8_t *pos, const uint8_t *begin, + const uint8_t *end) +{ + /* We allow @pos == @end representing the very end of the trace. + * + * This will result in -pte_eos when we actually try to read from @pos. + */ + return (begin <= pos) && (pos <= end); +} + +int pt_sync_set(const uint8_t **sync, const uint8_t *pos, + const struct pt_config *config) +{ + const uint8_t *begin, *end; + int errcode; + + if (!sync || !pos || !config) + return -pte_internal; + + begin = config->begin; + end = config->end; + + if (!pt_sync_within_bounds(pos, begin, end)) + return -pte_eos; + + if (end < pos + 2) + return -pte_eos; + + /* Check that this is indeed a psb packet we're at. */ + if (pos[0] != pt_opc_psb || pos[1] != pt_ext_psb) + return -pte_nosync; + + errcode = pt_pkt_read_psb(pos, config); + if (errcode < 0) + return errcode; + + *sync = pos; + + return 0; +} + +int pt_sync_forward(const uint8_t **sync, const uint8_t *pos, + const struct pt_config *config) +{ + const uint8_t *begin, *end; + + if (!sync || !pos || !config) + return -pte_internal; + + begin = config->begin; + end = config->end; + + if (!pt_sync_within_bounds(pos, begin, end)) + return -pte_internal; + + /* We search for a full 64bit word. It's OK to skip the current one. */ + pos = align(pos, sizeof(*psb_pattern)); + + /* Search for the psb payload pattern in the buffer. */ + for (;;) { + const uint8_t *current = pos; + uint64_t val; + + pos += sizeof(uint64_t); + if (end < pos) + return -pte_eos; + + val = * (const uint64_t *) current; + + if ((val != psb_pattern[0]) && (val != psb_pattern[1])) + continue; + + /* We found a 64bit word's worth of psb payload pattern. */ + current = pt_find_psb(pos, config); + if (!current) + continue; + + *sync = current; + return 0; + } +} + +int pt_sync_backward(const uint8_t **sync, const uint8_t *pos, + const struct pt_config *config) +{ + const uint8_t *begin, *end; + + if (!sync || !pos || !config) + return -pte_internal; + + begin = config->begin; + end = config->end; + + if (!pt_sync_within_bounds(pos, begin, end)) + return -pte_internal; + + /* We search for a full 64bit word. It's OK to skip the current one. */ + pos = truncate(pos, sizeof(*psb_pattern)); + + /* Search for the psb payload pattern in the buffer. */ + for (;;) { + const uint8_t *next = pos; + uint64_t val; + + pos -= sizeof(uint64_t); + if (pos < begin) + return -pte_eos; + + val = * (const uint64_t *) pos; + + if ((val != psb_pattern[0]) && (val != psb_pattern[1])) + continue; + + /* We found a 64bit word's worth of psb payload pattern. */ + next = pt_find_psb(next, config); + if (!next) + continue; + + *sync = next; + return 0; + } +} diff --git a/libipt/src/pt_time.c b/libipt/src/pt_time.c new file mode 100644 index 0000000..8c0265c --- /dev/null +++ b/libipt/src/pt_time.c @@ -0,0 +1,673 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_time.h" + +#include "intel-pt.h" + +#include +#include + + +void pt_time_init(struct pt_time *time) +{ + if (!time) + return; + + memset(time, 0, sizeof(*time)); +} + +int pt_time_query_tsc(uint64_t *tsc, uint32_t *lost_mtc, + uint32_t *lost_cyc, const struct pt_time *time) +{ + if (!tsc || !time) + return -pte_internal; + + *tsc = time->tsc; + + if (lost_mtc) + *lost_mtc = time->lost_mtc; + if (lost_cyc) + *lost_cyc = time->lost_cyc; + + if (!time->have_tsc) + return -pte_no_time; + + return 0; +} + +int pt_time_query_cbr(uint32_t *cbr, const struct pt_time *time) +{ + if (!cbr || !time) + return -pte_internal; + + if (!time->have_cbr) + return -pte_no_cbr; + + *cbr = time->cbr; + + return 0; +} + +/* Compute the distance between two CTC sources. + * + * We adjust a single wrap-around but fail if the distance is bigger than that. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_time_ctc_delta(uint32_t *ctc_delta, uint32_t ctc, + uint32_t last_ctc, const struct pt_config *config) +{ + if (!config || !ctc_delta) + return -pte_internal; + + /* Correct a single wrap-around. If we lost enough MTCs to wrap + * around twice, timing will be wrong until the next TSC. + */ + if (ctc < last_ctc) { + ctc += 1u << (config->mtc_freq + pt_pl_mtc_bit_size); + + /* Since we only store the CTC between TMA/MTC or MTC/TMC a + * single correction should suffice. + */ + if (ctc < last_ctc) + return -pte_bad_packet; + } + + *ctc_delta = ctc - last_ctc; + return 0; +} + +/* Translate CTC into the same unit as the FastCounter by multiplying with P. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_time_ctc_fc(uint64_t *fc, uint64_t ctc, + const struct pt_config *config) +{ + uint32_t eax, ebx; + + if (!fc || !config) + return -pte_internal; + + eax = config->cpuid_0x15_eax; + ebx = config->cpuid_0x15_ebx; + + /* Neither multiply nor divide by zero. */ + if (!eax || !ebx) + return -pte_bad_config; + + *fc = (ctc * ebx) / eax; + return 0; +} + +int pt_time_update_tsc(struct pt_time *time, + const struct pt_packet_tsc *packet, + const struct pt_config *config) +{ + (void) config; + + if (!time || !packet) + return -pte_internal; + + time->have_tsc = 1; + time->have_tma = 0; + time->have_mtc = 0; + time->tsc = time->base = packet->tsc; + time->ctc = 0; + time->fc = 0ull; + + /* We got the full time; we recover from previous losses. */ + time->lost_mtc = 0; + time->lost_cyc = 0; + + return 0; +} + +int pt_time_update_cbr(struct pt_time *time, + const struct pt_packet_cbr *packet, + const struct pt_config *config) +{ + (void) config; + + if (!time || !packet) + return -pte_internal; + + time->have_cbr = 1; + time->cbr = packet->ratio; + + return 0; +} + +int pt_time_update_tma(struct pt_time *time, + const struct pt_packet_tma *packet, + const struct pt_config *config) +{ + uint32_t ctc, mtc_freq, mtc_hi, ctc_mask; + uint64_t fc; + + if (!time || !packet || !config) + return -pte_internal; + + /* Without a TSC something is seriously wrong. */ + if (!time->have_tsc) + return -pte_bad_context; + + /* We shouldn't have more than one TMA per TSC. */ + if (time->have_tma) + return -pte_bad_context; + + /* We're ignoring MTC between TSC and TMA. */ + if (time->have_mtc) + return -pte_internal; + + ctc = packet->ctc; + fc = packet->fc; + + mtc_freq = config->mtc_freq; + mtc_hi = mtc_freq + pt_pl_mtc_bit_size; + + /* A mask for the relevant CTC bits ignoring high-order bits that are + * not provided by MTC. + */ + ctc_mask = (1u << mtc_hi) - 1u; + + time->have_tma = 1; + time->base -= fc; + time->fc += fc; + + /* If the MTC frequency is low enough that TMA provides the full CTC + * value, we can use the TMA as an MTC. + * + * If it isn't, we will estimate the preceding MTC based on the CTC bits + * the TMA provides at the next MTC. We forget about the previous MTC + * in this case. + * + * If no MTC packets are dropped around TMA, we will estimate the + * forgotten value again at the next MTC. + * + * If MTC packets are dropped, we can't really tell where in this + * extended MTC period the TSC occurred. The estimation will place it + * right before the next MTC. + */ + if (mtc_hi <= pt_pl_tma_ctc_bit_size) + time->have_mtc = 1; + + /* In both cases, we store the TMA's CTC bits until the next MTC. */ + time->ctc = time->ctc_cyc = ctc & ctc_mask; + + return 0; +} + +int pt_time_update_mtc(struct pt_time *time, + const struct pt_packet_mtc *packet, + const struct pt_config *config) +{ + uint32_t last_ctc, ctc, ctc_delta; + uint64_t tsc, base; + uint8_t mtc_freq; + int errcode, have_tsc, have_tma, have_mtc; + + if (!time || !packet || !config) + return -pte_internal; + + have_tsc = time->have_tsc; + have_tma = time->have_tma; + have_mtc = time->have_mtc; + + /* We ignore MTCs between TSC and TMA to avoid apparent CTC overflows. + * + * Later MTCs will ensure that no time is lost - provided TMA provides + * enough bits. If TMA doesn't provide any of the MTC bits we may place + * the TSC into the wrong MTC period. + */ + if (have_tsc && !have_tma) + return 0; + + base = time->base; + last_ctc = time->ctc; + mtc_freq = config->mtc_freq; + + ctc = packet->ctc << mtc_freq; + + /* Store our CTC value if we have or would have reset FC. */ + if (time->fc || time->lost_cyc || !have_mtc) + time->ctc_cyc = ctc; + + /* Prepare for the next packet in case we error out below. */ + time->have_mtc = 1; + time->fc = 0ull; + time->ctc = ctc; + + /* We recover from previous CYC losses. */ + time->lost_cyc = 0; + + /* Avoid a big jump when we see the first MTC with an arbitrary CTC + * payload. + */ + if (!have_mtc) { + uint32_t ctc_lo, ctc_hi; + + /* If we have not seen a TMA, we ignore this first MTC. + * + * We have no idea where in this MTC period tracing started. + * We could lose an entire MTC period or just a tiny fraction. + * + * On the other hand, if we assumed a previous MTC value, we + * might make just the same error. + */ + if (!have_tma) + return 0; + + /* The TMA's CTC value didn't provide enough bits - otherwise, + * we would have treated the TMA as an MTC. + */ + if (last_ctc & ~pt_pl_tma_ctc_mask) + return -pte_internal; + + /* Split this MTC's CTC value into low and high parts with + * respect to the bits provided by TMA. + */ + ctc_lo = ctc & pt_pl_tma_ctc_mask; + ctc_hi = ctc & ~pt_pl_tma_ctc_mask; + + /* We estimate the high-order CTC bits that are not provided by + * TMA based on the CTC bits provided by this MTC. + * + * We assume that no MTC packets were dropped around TMA. If + * there are, we might place the TSC into the wrong MTC period + * depending on how many CTC bits TMA provides and how many MTC + * packets were dropped. + * + * Note that we may underflow which results in more bits to be + * set than MTC packets may provide. Drop those extra bits. + */ + if (ctc_lo < last_ctc) { + ctc_hi -= 1u << pt_pl_tma_ctc_bit_size; + ctc_hi &= pt_pl_mtc_mask << mtc_freq; + } + + last_ctc |= ctc_hi; + } + + errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config); + if (errcode < 0) { + time->lost_mtc += 1; + return errcode; + } + + errcode = pt_time_ctc_fc(&tsc, ctc_delta, config); + if (errcode < 0) + return errcode; + + base += tsc; + time->tsc = time->base = base; + + return 0; +} + +/* Adjust a CYC packet's payload spanning multiple MTC periods. + * + * CYC packets measure the Fast Counter since the last CYC(-eligible) packet. + * Depending on the CYC threshold, we may not get a CYC for each MTC, so a CYC + * period may overlap with or even span multiple MTC periods. + * + * We can't do much about the overlap case without examining all packets in + * the respective periods. We leave this as expected imprecision. + * + * If we find a CYC packet to span multiple MTC packets, though, we try to + * approximate the portion for the current MTC period by subtracting the + * estimated portion for previous MTC periods using calibration information. + * + * We only consider MTC. For the first CYC after TSC, the corresponding TMA + * will contain the Fast Counter at TSC. + * + * Returns zero on success, a negative error code otherwise. + */ +static int pt_time_adjust_cyc(uint64_t *cyc, const struct pt_time *time, + const struct pt_config *config, uint64_t fcr) +{ + uint32_t last_ctc, ctc, ctc_delta; + uint64_t fc, total_cyc, old_cyc; + int errcode; + + if (!time || !config || !fcr) + return -pte_internal; + + last_ctc = time->ctc_cyc; + ctc = time->ctc; + + /* There is nothing to do if this is the current MTC period. */ + if (ctc == last_ctc) + return 0; + + /* Calibration computes + * + * fc = (ctc_delta * cpuid[0x15].ebx) / cpuid[0x15].eax. + * fcr = (fc << pt_tcal_fcr_shr) / cyc + * + * So cyc = (fc << pt_tcal_fcr_shr) / fcr. + */ + + errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config); + if (errcode < 0) + return errcode; + + errcode = pt_time_ctc_fc(&fc, ctc_delta, config); + if (errcode < 0) + return errcode; + + old_cyc = (fc << pt_tcal_fcr_shr) / fcr; + total_cyc = *cyc; + + /* Make sure we don't wrap around. If we would, attribute the entire + * CYC payload to any previous MTC period. + * + * We lost an unknown portion of the CYC payload for the current MTC + * period, but it's usually better to run too slow than too fast. + */ + if (total_cyc < old_cyc) + total_cyc = old_cyc; + + *cyc = total_cyc - old_cyc; + return 0; +} + +int pt_time_update_cyc(struct pt_time *time, + const struct pt_packet_cyc *packet, + const struct pt_config *config, uint64_t fcr) +{ + uint64_t cyc, fc; + + if (!time || !packet || !config) + return -pte_internal; + + if (!fcr) { + time->lost_cyc += 1; + return 0; + } + + cyc = packet->value; + fc = time->fc; + if (!fc) { + int errcode; + + errcode = pt_time_adjust_cyc(&cyc, time, config, fcr); + if (errcode < 0) + return errcode; + } + + fc += (cyc * fcr) >> pt_tcal_fcr_shr; + + time->fc = fc; + time->tsc = time->base + fc; + + return 0; +} + +void pt_tcal_init(struct pt_time_cal *tcal) +{ + if (!tcal) + return; + + memset(tcal, 0, sizeof(*tcal)); + + tcal->min_fcr = UINT64_MAX; +} + +static int pt_tcal_have_fcr(const struct pt_time_cal *tcal) +{ + if (!tcal) + return 0; + + return (tcal->min_fcr <= tcal->max_fcr); +} + +int pt_tcal_fcr(uint64_t *fcr, const struct pt_time_cal *tcal) +{ + if (!fcr || !tcal) + return -pte_internal; + + if (!pt_tcal_have_fcr(tcal)) + return -pte_no_time; + + *fcr = tcal->fcr; + + return 0; +} + +int pt_tcal_set_fcr(struct pt_time_cal *tcal, uint64_t fcr) +{ + if (!tcal) + return -pte_internal; + + tcal->fcr = fcr; + + if (fcr < tcal->min_fcr) + tcal->min_fcr = fcr; + + if (fcr > tcal->max_fcr) + tcal->max_fcr = fcr; + + return 0; +} + +int pt_tcal_update_tsc(struct pt_time_cal *tcal, + const struct pt_packet_tsc *packet, + const struct pt_config *config) +{ + (void) config; + + if (!tcal || !packet) + return -pte_internal; + + /* A TSC outside of PSB+ may indicate loss of time. We do not use it + * for calibration. We store the TSC value for calibration at the next + * TSC in PSB+, though. + */ + tcal->tsc = packet->tsc; + tcal->cyc_tsc = 0ull; + + return 0; +} + +int pt_tcal_header_tsc(struct pt_time_cal *tcal, + const struct pt_packet_tsc *packet, + const struct pt_config *config) +{ + uint64_t tsc, last_tsc, tsc_delta, cyc, fcr; + + (void) config; + + if (!tcal || !packet) + return -pte_internal; + + last_tsc = tcal->tsc; + cyc = tcal->cyc_tsc; + + tsc = packet->tsc; + + tcal->tsc = tsc; + tcal->cyc_tsc = 0ull; + + if (!last_tsc || !cyc) + return 0; + + /* Correct a single wrap-around. */ + if (tsc < last_tsc) { + tsc += 1ull << pt_pl_tsc_bit_size; + + if (tsc < last_tsc) + return -pte_bad_packet; + } + + tsc_delta = tsc - last_tsc; + + /* We shift the nominator to improve rounding precision. + * + * Since we're only collecting the CYCs between two TSC, we shouldn't + * overflow. Let's rather fail than overflow. + */ + if (tsc_delta & ~(~0ull >> pt_tcal_fcr_shr)) + return -pte_internal; + + fcr = (tsc_delta << pt_tcal_fcr_shr) / cyc; + + return pt_tcal_set_fcr(tcal, fcr); +} + +int pt_tcal_update_cbr(struct pt_time_cal *tcal, + const struct pt_packet_cbr *packet, + const struct pt_config *config) +{ + /* A CBR outside of PSB+ indicates a frequency change. Reset our + * calibration state. + */ + pt_tcal_init(tcal); + + return pt_tcal_header_cbr(tcal, packet, config); +} + +int pt_tcal_header_cbr(struct pt_time_cal *tcal, + const struct pt_packet_cbr *packet, + const struct pt_config *config) +{ + uint64_t cbr, p1, fcr; + + if (!tcal || !packet || !config) + return -pte_internal; + + p1 = config->nom_freq; + if (!p1) + return 0; + + /* If we know the nominal frequency, we can use it for calibration. */ + cbr = packet->ratio; + + fcr = (p1 << pt_tcal_fcr_shr) / cbr; + + return pt_tcal_set_fcr(tcal, fcr); +} + +int pt_tcal_update_tma(struct pt_time_cal *tcal, + const struct pt_packet_tma *packet, + const struct pt_config *config) +{ + (void) tcal; + (void) packet; + (void) config; + + /* Nothing to do. */ + return 0; +} + +int pt_tcal_update_mtc(struct pt_time_cal *tcal, + const struct pt_packet_mtc *packet, + const struct pt_config *config) +{ + uint32_t last_ctc, ctc, ctc_delta, have_mtc; + uint64_t cyc, fc, fcr; + int errcode; + + if (!tcal || !packet || !config) + return -pte_internal; + + last_ctc = tcal->ctc; + have_mtc = tcal->have_mtc; + cyc = tcal->cyc_mtc; + + ctc = packet->ctc << config->mtc_freq; + + /* We need at least two MTC (including this). */ + if (!have_mtc) { + tcal->cyc_mtc = 0ull; + tcal->ctc = ctc; + tcal->have_mtc = 1; + + return 0; + } + + /* Without any cycles, we can't calibrate. Try again at the next + * MTC and distribute the cycles over the combined MTC period. + */ + if (!cyc) + return 0; + + /* Prepare for the next packet in case we error out below. */ + tcal->have_mtc = 1; + tcal->cyc_mtc = 0ull; + tcal->ctc = ctc; + + /* Let's pretend we will fail. We'll correct it at the end. */ + tcal->lost_mtc += 1; + + errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config); + if (errcode < 0) + return errcode; + + errcode = pt_time_ctc_fc(&fc, ctc_delta, config); + if (errcode < 0) + return errcode; + + /* We shift the nominator to improve rounding precision. + * + * Since we're only collecting the CYCs between two MTC, we shouldn't + * overflow. Let's rather fail than overflow. + */ + if (fc & ~(~0ull >> pt_tcal_fcr_shr)) + return -pte_internal; + + fcr = (fc << pt_tcal_fcr_shr) / cyc; + + errcode = pt_tcal_set_fcr(tcal, fcr); + if (errcode < 0) + return errcode; + + /* We updated the FCR. This recovers from previous MTC losses. */ + tcal->lost_mtc = 0; + + return 0; +} + +int pt_tcal_update_cyc(struct pt_time_cal *tcal, + const struct pt_packet_cyc *packet, + const struct pt_config *config) +{ + uint64_t cyc; + + (void) config; + + if (!tcal || !packet) + return -pte_internal; + + cyc = packet->value; + tcal->cyc_mtc += cyc; + tcal->cyc_tsc += cyc; + + return 0; +} diff --git a/libipt/src/pt_tnt_cache.c b/libipt/src/pt_tnt_cache.c new file mode 100644 index 0000000..c2bc99a --- /dev/null +++ b/libipt/src/pt_tnt_cache.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_tnt_cache.h" + +#include "intel-pt.h" + + +void pt_tnt_cache_init(struct pt_tnt_cache *cache) +{ + if (!cache) + return; + + cache->tnt = 0ull; + cache->index = 0ull; +} + +int pt_tnt_cache_is_empty(const struct pt_tnt_cache *cache) +{ + if (!cache) + return -pte_invalid; + + return cache->index == 0; +} + +int pt_tnt_cache_query(struct pt_tnt_cache *cache) +{ + int taken; + + if (!cache) + return -pte_invalid; + + if (!cache->index) + return -pte_bad_query; + + taken = (cache->tnt & cache->index) != 0; + cache->index >>= 1; + + return taken; +} + +int pt_tnt_cache_update_tnt(struct pt_tnt_cache *cache, + const struct pt_packet_tnt *packet, + const struct pt_config *config) +{ + uint8_t bit_size; + + (void) config; + + if (!cache || !packet) + return -pte_invalid; + + if (cache->index) + return -pte_bad_context; + + bit_size = packet->bit_size; + if (!bit_size) + return -pte_bad_packet; + + cache->tnt = packet->payload; + cache->index = 1ull << (bit_size - 1); + + return 0; +} diff --git a/libipt/src/pt_version.c b/libipt/src/pt_version.c new file mode 100644 index 0000000..514625c --- /dev/null +++ b/libipt/src/pt_version.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "intel-pt.h" + + +struct pt_version pt_library_version() +{ + struct pt_version v = { + /* .major = */ PT_VERSION_MAJOR, + /* .minor = */ PT_VERSION_MINOR, + /* .reserved = */ 0, + /* .build = */ PT_VERSION_BUILD, + /* .ext = */ PT_VERSION_EXT + }; + + return v; +} diff --git a/libipt/src/windows/init.c b/libipt/src/windows/init.c new file mode 100644 index 0000000..a2f283e --- /dev/null +++ b/libipt/src/windows/init.c @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_ild.h" + +#include + + +BOOLEAN WINAPI DllMain(HINSTANCE handle, DWORD reason, LPVOID reserved) +{ + (void) handle; + (void) reserved; + + switch (reason) { + case DLL_PROCESS_ATTACH: + /* Initialize the Intel(R) Processor Trace instruction + decoder. */ + pt_ild_init(); + break; + + default: + break; + } + + return TRUE; +} diff --git a/libipt/src/windows/pt_cpuid.c b/libipt/src/windows/pt_cpuid.c new file mode 100644 index 0000000..1f9dcee --- /dev/null +++ b/libipt/src/windows/pt_cpuid.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_cpuid.h" + +#include + +extern void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + int cpu_info[4]; + + __cpuid(cpu_info, leaf); + *eax = cpu_info[0]; + *ebx = cpu_info[1]; + *ecx = cpu_info[2]; + *edx = cpu_info[3]; +} diff --git a/libipt/src/windows/pt_section_windows.c b/libipt/src/windows/pt_section_windows.c new file mode 100644 index 0000000..1ef9135 --- /dev/null +++ b/libipt/src/windows/pt_section_windows.c @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_section.h" +#include "pt_section_windows.h" +#include "pt_section_file.h" + +#include "intel-pt.h" + +#include +#include +#include +#include + + +static int pt_sec_windows_fstat(const char *filename, struct _stat *stat) +{ + int fd, errcode; + + if (!filename || !stat) + return -pte_internal; + + fd = _open(filename, _O_RDONLY); + if (fd == -1) + return -pte_bad_image; + + errcode = _fstat(fd, stat); + + _close(fd); + + if (errcode) + return -pte_bad_image; + + return 0; +} + +int pt_section_mk_status(void **pstatus, uint64_t *psize, const char *filename) +{ + struct pt_sec_windows_status *status; + struct _stat stat; + int errcode; + + if (!pstatus || !psize) + return -pte_internal; + + errcode = pt_sec_windows_fstat(filename, &stat); + if (errcode < 0) + return errcode; + + if (stat.st_size < 0) + return -pte_bad_image; + + status = malloc(sizeof(*status)); + if (!status) + return -pte_nomem; + + status->stat = stat; + + *pstatus = status; + *psize = stat.st_size; + + return 0; +} + +static int check_file_status(struct pt_section *section, int fd) +{ + struct pt_sec_windows_status *status; + struct _stat stat; + int errcode; + + if (!section) + return -pte_internal; + + errcode = _fstat(fd, &stat); + if (errcode) + return -pte_bad_image; + + status = section->status; + if (!status) + return -pte_internal; + + if (stat.st_size != status->stat.st_size) + return -pte_bad_image; + + if (stat.st_mtime != status->stat.st_mtime) + return -pte_bad_image; + + return 0; +} + +static DWORD granularity(void) +{ + struct _SYSTEM_INFO sysinfo; + + GetSystemInfo(&sysinfo); + + return sysinfo.dwAllocationGranularity; +} + +int pt_sec_windows_map(struct pt_section *section, int fd) +{ + struct pt_sec_windows_mapping *mapping; + uint64_t offset, size, adjustment; + HANDLE fh, mh; + DWORD dsize; + uint8_t *base; + + if (!section) + return -pte_internal; + + offset = section->offset; + size = section->size; + + adjustment = offset % granularity(); + + offset -= adjustment; + size += adjustment; + + /* The section is supposed to fit into the file so we shouldn't + * see any overflows, here. + */ + if (size < section->size) + return -pte_internal; + + dsize = (DWORD) size; + if ((uint64_t) dsize != size) + return -pte_internal; + + fh = (HANDLE) _get_osfhandle(fd); + + mh = CreateFileMapping(fh, NULL, PAGE_READONLY, 0, 0, NULL); + if (!mh) + return -pte_bad_image; + + base = MapViewOfFile(mh, FILE_MAP_READ, (DWORD) (offset >> 32), + (DWORD) (uint32_t) offset, dsize); + if (!base) + goto out_mh; + + mapping = malloc(sizeof(*mapping)); + if (!mapping) + goto out_map; + + mapping->fd = fd; + mapping->mh = mh; + mapping->base = base; + mapping->begin = base + adjustment; + mapping->end = base + size; + + section->mapping = mapping; + section->unmap = pt_sec_windows_unmap; + section->read = pt_sec_windows_read; + + return pt_section_add_bcache(section); + +out_map: + UnmapViewOfFile(base); + +out_mh: + CloseHandle(mh); + return -pte_bad_image; +} + +int pt_section_map(struct pt_section *section) +{ + const char *filename; + uint16_t mcount; + HANDLE fh; + FILE *file; + int fd, errcode; + + if (!section) + return -pte_internal; + + errcode = pt_section_lock(section); + if (errcode < 0) + return errcode; + + mcount = section->mcount + 1; + if (mcount > 1) { + section->mcount = mcount; + return pt_section_unlock(section); + } + + if (!mcount) { + errcode = -pte_internal; + goto out_unlock; + } + + if (section->mapping) { + errcode = -pte_internal; + goto out_unlock; + } + + filename = section->filename; + if (!filename) { + errcode = -pte_internal; + goto out_unlock; + } + + fh = CreateFile(filename, GENERIC_READ, FILE_SHARE_READ, NULL, + OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); + if (fh == INVALID_HANDLE_VALUE) { + /* We failed to open the file read-only. Let's try to open it + * read-write; maybe our user has the file open for writing. + * + * We will detect changes to the file via fstat(). + */ + + fh = CreateFile(filename, GENERIC_READ, FILE_SHARE_WRITE, NULL, + OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); + if (fh == INVALID_HANDLE_VALUE) { + errcode = -pte_bad_image; + goto out_unlock; + } + } + + fd = _open_osfhandle((intptr_t) fh, _O_RDONLY); + if (fd == -1) { + errcode = -pte_bad_image; + goto out_fh; + } + + errcode = check_file_status(section, fd); + if (errcode < 0) { + errcode = -pte_bad_image; + goto out_fd; + } + + /* We leave the file open on success. It will be closed when the + * section is unmapped. + */ + errcode = pt_sec_windows_map(section, fd); + if (!errcode) { + section->mcount = 1; + return pt_section_unlock(section); + } + + /* Fall back to file based sections - report the original error + * if we fail to convert the file descriptor. + */ + file = _fdopen(fd, "rb"); + if (!file) { + errcode = -pte_bad_image; + goto out_fd; + } + + /* We need to keep the file open on success. It will be closed when + * the section is unmapped. + */ + errcode = pt_sec_file_map(section, file); + if (!errcode) { + section->mcount = 1; + return pt_section_unlock(section); + } + + fclose(file); + goto out_unlock; + +out_fd: + _close(fd); + return errcode; + +out_fh: + CloseHandle(fh); + +out_unlock: + (void) pt_section_unlock(section); + return errcode; +} + +int pt_sec_windows_unmap(struct pt_section *section) +{ + struct pt_sec_windows_mapping *mapping; + + if (!section) + return -pte_internal; + + mapping = section->mapping; + if (!mapping || !section->unmap || !section->read) + return -pte_internal; + + section->mapping = NULL; + section->unmap = NULL; + section->read = NULL; + + UnmapViewOfFile(mapping->begin); + CloseHandle(mapping->mh); + _close(mapping->fd); + free(mapping); + + return 0; +} + +int pt_sec_windows_read(const struct pt_section *section, uint8_t *buffer, + uint16_t size, uint64_t offset) +{ + struct pt_sec_windows_mapping *mapping; + const uint8_t *begin; + + if (!buffer || !section) + return -pte_internal; + + mapping = section->mapping; + if (!mapping) + return -pte_internal; + + /* We already checked in pt_section_read() that the requested memory + * lies within the section's boundaries. + * + * And we checked that the entire section was mapped. There's no need + * to check for overflows, again. + */ + begin = mapping->begin + offset; + + memcpy(buffer, begin, size); + return (int) size; +} diff --git a/libipt/test/src/ptunit-asid.c b/libipt/test/src/ptunit-asid.c new file mode 100644 index 0000000..8cda032 --- /dev/null +++ b/libipt/test/src/ptunit-asid.c @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_asid.h" + +#include "intel-pt.h" + +#include + + +static struct ptunit_result from_user_null(void) +{ + struct pt_asid user; + int errcode; + + pt_asid_init(&user); + + errcode = pt_asid_from_user(NULL, NULL); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_asid_from_user(NULL, &user); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result from_user_default(void) +{ + struct pt_asid asid; + int errcode; + + errcode = pt_asid_from_user(&asid, NULL); + ptu_int_eq(errcode, 0); + ptu_uint_eq(asid.size, sizeof(asid)); + ptu_uint_eq(asid.cr3, pt_asid_no_cr3); + ptu_uint_eq(asid.vmcs, pt_asid_no_vmcs); + + return ptu_passed(); +} + +static struct ptunit_result from_user_small(void) +{ + struct pt_asid asid, user; + int errcode; + + user.size = sizeof(user.size); + + errcode = pt_asid_from_user(&asid, &user); + ptu_int_eq(errcode, 0); + ptu_uint_eq(asid.size, sizeof(asid)); + ptu_uint_eq(asid.cr3, pt_asid_no_cr3); + ptu_uint_eq(asid.vmcs, pt_asid_no_vmcs); + + return ptu_passed(); +} + +static struct ptunit_result from_user_big(void) +{ + struct pt_asid asid, user; + int errcode; + + user.size = sizeof(user) + 4; + user.cr3 = 0x4200ull; + user.vmcs = 0x23000ull; + + errcode = pt_asid_from_user(&asid, &user); + ptu_int_eq(errcode, 0); + ptu_uint_eq(asid.size, sizeof(asid)); + ptu_uint_eq(asid.cr3, 0x4200ull); + ptu_uint_eq(asid.vmcs, 0x23000ull); + + return ptu_passed(); +} + +static struct ptunit_result from_user(void) +{ + struct pt_asid asid, user; + int errcode; + + user.size = sizeof(user); + user.cr3 = 0x4200ull; + user.vmcs = 0x23000ull; + + errcode = pt_asid_from_user(&asid, &user); + ptu_int_eq(errcode, 0); + ptu_uint_eq(asid.size, sizeof(asid)); + ptu_uint_eq(asid.cr3, 0x4200ull); + ptu_uint_eq(asid.vmcs, 0x23000ull); + + return ptu_passed(); +} + +static struct ptunit_result from_user_cr3(void) +{ + struct pt_asid asid, user; + int errcode; + + user.size = offsetof(struct pt_asid, vmcs); + user.cr3 = 0x4200ull; + user.vmcs = 0x23000ull; + + errcode = pt_asid_from_user(&asid, &user); + ptu_int_eq(errcode, 0); + ptu_uint_eq(asid.size, sizeof(asid)); + ptu_uint_eq(asid.cr3, 0x4200ull); + ptu_uint_eq(asid.vmcs, pt_asid_no_vmcs); + + return ptu_passed(); +} + +static struct ptunit_result match_null(void) +{ + struct pt_asid asid; + int errcode; + + pt_asid_init(&asid); + + errcode = pt_asid_match(NULL, NULL); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_asid_match(NULL, &asid); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_asid_match(&asid, NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result match_default(void) +{ + struct pt_asid lhs, rhs; + int errcode; + + pt_asid_init(&lhs); + pt_asid_init(&rhs); + + errcode = pt_asid_match(&lhs, &rhs); + ptu_int_eq(errcode, 1); + + lhs.cr3 = 0x2300ull; + lhs.vmcs = 0x42000ull; + + errcode = pt_asid_match(&lhs, &rhs); + ptu_int_eq(errcode, 1); + + errcode = pt_asid_match(&rhs, &lhs); + ptu_int_eq(errcode, 1); + + return ptu_passed(); +} + +static struct ptunit_result match_default_mixed(void) +{ + struct pt_asid lhs, rhs; + int errcode; + + pt_asid_init(&lhs); + pt_asid_init(&rhs); + + errcode = pt_asid_match(&lhs, &rhs); + ptu_int_eq(errcode, 1); + + lhs.cr3 = 0x2300ull; + rhs.vmcs = 0x42000ull; + + errcode = pt_asid_match(&lhs, &rhs); + ptu_int_eq(errcode, 1); + + errcode = pt_asid_match(&rhs, &lhs); + ptu_int_eq(errcode, 1); + + return ptu_passed(); +} + +static struct ptunit_result match_cr3(void) +{ + struct pt_asid lhs, rhs; + int errcode; + + pt_asid_init(&lhs); + pt_asid_init(&rhs); + + lhs.cr3 = 0x2300ull; + rhs.cr3 = 0x2300ull; + + errcode = pt_asid_match(&lhs, &rhs); + ptu_int_eq(errcode, 1); + + return ptu_passed(); +} + +static struct ptunit_result match_vmcs(void) +{ + struct pt_asid lhs, rhs; + int errcode; + + pt_asid_init(&lhs); + pt_asid_init(&rhs); + + lhs.vmcs = 0x23000ull; + rhs.vmcs = 0x23000ull; + + errcode = pt_asid_match(&lhs, &rhs); + ptu_int_eq(errcode, 1); + + return ptu_passed(); +} + +static struct ptunit_result match(void) +{ + struct pt_asid lhs, rhs; + int errcode; + + pt_asid_init(&lhs); + pt_asid_init(&rhs); + + lhs.cr3 = 0x2300ull; + rhs.cr3 = 0x2300ull; + lhs.vmcs = 0x23000ull; + rhs.vmcs = 0x23000ull; + + errcode = pt_asid_match(&lhs, &rhs); + ptu_int_eq(errcode, 1); + + return ptu_passed(); +} + +static struct ptunit_result match_cr3_false(void) +{ + struct pt_asid lhs, rhs; + int errcode; + + pt_asid_init(&lhs); + pt_asid_init(&rhs); + + lhs.cr3 = 0x4200ull; + rhs.cr3 = 0x2300ull; + + errcode = pt_asid_match(&lhs, &rhs); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result match_vmcs_false(void) +{ + struct pt_asid lhs, rhs; + int errcode; + + pt_asid_init(&lhs); + pt_asid_init(&rhs); + + lhs.vmcs = 0x42000ull; + rhs.vmcs = 0x23000ull; + + errcode = pt_asid_match(&lhs, &rhs); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, from_user_null); + ptu_run(suite, from_user_default); + ptu_run(suite, from_user_small); + ptu_run(suite, from_user_big); + ptu_run(suite, from_user); + ptu_run(suite, from_user_cr3); + + ptu_run(suite, match_null); + ptu_run(suite, match_default); + ptu_run(suite, match_default_mixed); + ptu_run(suite, match_cr3); + ptu_run(suite, match_vmcs); + ptu_run(suite, match); + ptu_run(suite, match_cr3_false); + ptu_run(suite, match_vmcs_false); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-block_cache.c b/libipt/test/src/ptunit-block_cache.c new file mode 100644 index 0000000..2a1449a --- /dev/null +++ b/libipt/test/src/ptunit-block_cache.c @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2016-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit_threads.h" + +#include "pt_block_cache.h" + +#include + + +/* A test fixture optionally providing a block cache and automatically freeing + * the cache. + */ +struct bcache_fixture { + /* Threading support. */ + struct ptunit_thrd_fixture thrd; + + /* The cache - it will be freed automatically. */ + struct pt_block_cache *bcache; + + /* The test fixture initialization and finalization functions. */ + struct ptunit_result (*init)(struct bcache_fixture *); + struct ptunit_result (*fini)(struct bcache_fixture *); +}; + +enum { + /* The number of entries in fixture-provided caches. */ + bfix_nentries = 0x10000, + +#if defined(FEATURE_THREADS) + + /* The number of additional threads to use for stress testing. */ + bfix_threads = 3, + +#endif /* defined(FEATURE_THREADS) */ + + /* The number of iterations in stress testing. */ + bfix_iterations = 0x10 +}; + +static struct ptunit_result cfix_init(struct bcache_fixture *bfix) +{ + ptu_test(ptunit_thrd_init, &bfix->thrd); + + bfix->bcache = NULL; + + return ptu_passed(); +} + +static struct ptunit_result bfix_init(struct bcache_fixture *bfix) +{ + ptu_test(cfix_init, bfix); + + bfix->bcache = pt_bcache_alloc(bfix_nentries); + ptu_ptr(bfix->bcache); + + return ptu_passed(); +} + +static struct ptunit_result bfix_fini(struct bcache_fixture *bfix) +{ + int thrd; + + ptu_test(ptunit_thrd_fini, &bfix->thrd); + + for (thrd = 0; thrd < bfix->thrd.nthreads; ++thrd) + ptu_int_eq(bfix->thrd.result[thrd], 0); + + pt_bcache_free(bfix->bcache); + + return ptu_passed(); +} + +static struct ptunit_result bcache_entry_size(void) +{ + ptu_uint_eq(sizeof(struct pt_bcache_entry), sizeof(uint32_t)); + + return ptu_passed(); +} + +static struct ptunit_result bcache_size(void) +{ + ptu_uint_le(sizeof(struct pt_block_cache), + 2 * sizeof(struct pt_bcache_entry)); + + return ptu_passed(); +} + +static struct ptunit_result free_null(void) +{ + pt_bcache_free(NULL); + + return ptu_passed(); +} + +static struct ptunit_result add_null(void) +{ + struct pt_bcache_entry bce; + int errcode; + + memset(&bce, 0, sizeof(bce)); + + errcode = pt_bcache_add(NULL, 0ull, bce); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result lookup_null(void) +{ + struct pt_bcache_entry bce; + struct pt_block_cache bcache; + int errcode; + + errcode = pt_bcache_lookup(&bce, NULL, 0ull); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_bcache_lookup(NULL, &bcache, 0ull); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result alloc(struct bcache_fixture *bfix) +{ + bfix->bcache = pt_bcache_alloc(0x10000ull); + ptu_ptr(bfix->bcache); + + return ptu_passed(); +} + +static struct ptunit_result alloc_min(struct bcache_fixture *bfix) +{ + bfix->bcache = pt_bcache_alloc(1ull); + ptu_ptr(bfix->bcache); + + return ptu_passed(); +} + +static struct ptunit_result alloc_too_big(struct bcache_fixture *bfix) +{ + bfix->bcache = pt_bcache_alloc(UINT32_MAX + 1ull); + ptu_null(bfix->bcache); + + return ptu_passed(); +} + +static struct ptunit_result alloc_zero(struct bcache_fixture *bfix) +{ + bfix->bcache = pt_bcache_alloc(0ull); + ptu_null(bfix->bcache); + + return ptu_passed(); +} + +static struct ptunit_result initially_empty(struct bcache_fixture *bfix) +{ + uint64_t index; + + for (index = 0; index < bfix_nentries; ++index) { + struct pt_bcache_entry bce; + int status; + + memset(&bce, 0xff, sizeof(bce)); + + status = pt_bcache_lookup(&bce, bfix->bcache, index); + ptu_int_eq(status, 0); + + status = pt_bce_is_valid(bce); + ptu_int_eq(status, 0); + } + + return ptu_passed(); +} + +static struct ptunit_result add_bad_index(struct bcache_fixture *bfix) +{ + struct pt_bcache_entry bce; + int errcode; + + memset(&bce, 0, sizeof(bce)); + + errcode = pt_bcache_add(bfix->bcache, bfix_nentries, bce); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result lookup_bad_index(struct bcache_fixture *bfix) +{ + struct pt_bcache_entry bce; + int errcode; + + errcode = pt_bcache_lookup(&bce, bfix->bcache, bfix_nentries); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result add(struct bcache_fixture *bfix, uint64_t index) +{ + struct pt_bcache_entry bce, exp; + int errcode; + + memset(&bce, 0xff, sizeof(bce)); + memset(&exp, 0x00, sizeof(exp)); + + exp.ninsn = 1; + exp.displacement = 7; + exp.mode = ptem_64bit; + exp.qualifier = ptbq_decode; + exp.isize = 7; + + errcode = pt_bcache_add(bfix->bcache, index, exp); + ptu_int_eq(errcode, 0); + + errcode = pt_bcache_lookup(&bce, bfix->bcache, index); + ptu_int_eq(errcode, 0); + + ptu_uint_eq(bce.ninsn, exp.ninsn); + ptu_int_eq(bce.displacement, exp.displacement); + ptu_uint_eq(pt_bce_exec_mode(bce), pt_bce_exec_mode(exp)); + ptu_uint_eq(pt_bce_qualifier(bce), pt_bce_qualifier(exp)); + ptu_uint_eq(bce.isize, exp.isize); + + return ptu_passed(); +} + +static int worker(void *arg) +{ + struct pt_bcache_entry exp; + struct pt_block_cache *bcache; + uint64_t iter, index; + + bcache = arg; + if (!bcache) + return -pte_internal; + + memset(&exp, 0x00, sizeof(exp)); + exp.ninsn = 5; + exp.displacement = 28; + exp.mode = ptem_64bit; + exp.qualifier = ptbq_again; + exp.isize = 3; + + for (index = 0; index < bfix_nentries; ++index) { + for (iter = 0; iter < bfix_iterations; ++iter) { + struct pt_bcache_entry bce; + int errcode; + + memset(&bce, 0xff, sizeof(bce)); + + errcode = pt_bcache_lookup(&bce, bcache, index); + if (errcode < 0) + return errcode; + + if (!pt_bce_is_valid(bce)) { + errcode = pt_bcache_add(bcache, index, exp); + if (errcode < 0) + return errcode; + } + + errcode = pt_bcache_lookup(&bce, bcache, index); + if (errcode < 0) + return errcode; + + if (!pt_bce_is_valid(bce)) + return -pte_nosync; + + if (bce.ninsn != exp.ninsn) + return -pte_nosync; + + if (bce.displacement != exp.displacement) + return -pte_nosync; + + if (pt_bce_exec_mode(bce) != pt_bce_exec_mode(exp)) + return -pte_nosync; + + if (pt_bce_qualifier(bce) != pt_bce_qualifier(exp)) + return -pte_nosync; + + if (bce.isize != exp.isize) + return -pte_nosync; + } + } + + return 0; +} + +static struct ptunit_result stress(struct bcache_fixture *bfix) +{ + int errcode; + +#if defined(FEATURE_THREADS) + { + int thrd; + + for (thrd = 0; thrd < bfix_threads; ++thrd) + ptu_test(ptunit_thrd_create, &bfix->thrd, worker, + bfix->bcache); + } +#endif /* defined(FEATURE_THREADS) */ + + errcode = worker(bfix->bcache); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct bcache_fixture bfix, cfix; + struct ptunit_suite suite; + + bfix.init = bfix_init; + bfix.fini = bfix_fini; + + cfix.init = cfix_init; + cfix.fini = bfix_fini; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, bcache_entry_size); + ptu_run(suite, bcache_size); + + ptu_run(suite, free_null); + ptu_run(suite, add_null); + ptu_run(suite, lookup_null); + + ptu_run_f(suite, alloc, cfix); + ptu_run_f(suite, alloc_min, cfix); + ptu_run_f(suite, alloc_too_big, cfix); + ptu_run_f(suite, alloc_zero, cfix); + + ptu_run_f(suite, initially_empty, bfix); + + ptu_run_f(suite, add_bad_index, bfix); + ptu_run_f(suite, lookup_bad_index, bfix); + + ptu_run_fp(suite, add, bfix, 0ull); + ptu_run_fp(suite, add, bfix, bfix_nentries - 1ull); + ptu_run_f(suite, stress, bfix); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-config.c b/libipt/test/src/ptunit-config.c new file mode 100644 index 0000000..433e3fe --- /dev/null +++ b/libipt/test/src/ptunit-config.c @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_config.h" + +#include "intel-pt.h" + +#include + + +/* A global fake buffer to pacify static analyzers. */ +static uint8_t buffer[8]; + +static struct ptunit_result from_user_null(void) +{ + struct pt_config config; + int errcode; + + errcode = pt_config_from_user(NULL, &config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_config_from_user(&config, NULL); + ptu_int_eq(errcode, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result from_user_too_small(void) +{ + struct pt_config config, user; + int errcode; + + user.size = sizeof(config.size); + + errcode = pt_config_from_user(&config, &user); + ptu_int_eq(errcode, -pte_bad_config); + + return ptu_passed(); +} + +static struct ptunit_result from_user_bad_buffer(void) +{ + struct pt_config config, user; + int errcode; + + pt_config_init(&user); + + errcode = pt_config_from_user(&config, &user); + ptu_int_eq(errcode, -pte_bad_config); + + user.begin = buffer; + + errcode = pt_config_from_user(&config, &user); + ptu_int_eq(errcode, -pte_bad_config); + + user.begin = NULL; + user.end = buffer; + + errcode = pt_config_from_user(&config, &user); + ptu_int_eq(errcode, -pte_bad_config); + + user.begin = &buffer[1]; + user.end = buffer; + + errcode = pt_config_from_user(&config, &user); + ptu_int_eq(errcode, -pte_bad_config); + + return ptu_passed(); +} + +static struct ptunit_result from_user(void) +{ + struct pt_config config, user; + int errcode; + + user.size = sizeof(user); + user.begin = buffer; + user.end = &buffer[sizeof(buffer)]; + user.cpu.vendor = pcv_intel; + user.errata.bdm70 = 1; + + errcode = pt_config_from_user(&config, &user); + ptu_int_eq(errcode, 0); + ptu_uint_eq(config.size, sizeof(config)); + ptu_ptr_eq(config.begin, buffer); + ptu_ptr_eq(config.end, &buffer[sizeof(buffer)]); + ptu_int_eq(config.cpu.vendor, pcv_intel); + ptu_uint_eq(config.errata.bdm70, 1); + + return ptu_passed(); +} + +static struct ptunit_result from_user_small(void) +{ + struct pt_config config, user; + int errcode; + + memset(&config, 0xcd, sizeof(config)); + + user.size = offsetof(struct pt_config, cpu); + user.begin = buffer; + user.end = &buffer[sizeof(buffer)]; + + errcode = pt_config_from_user(&config, &user); + ptu_int_eq(errcode, 0); + ptu_uint_eq(config.size, offsetof(struct pt_config, cpu)); + ptu_ptr_eq(config.begin, buffer); + ptu_ptr_eq(config.end, &buffer[sizeof(buffer)]); + ptu_int_eq(config.cpu.vendor, pcv_unknown); + ptu_uint_eq(config.errata.bdm70, 0); + + return ptu_passed(); +} + +static struct ptunit_result from_user_big(void) +{ + struct pt_config config, user; + int errcode; + + user.size = sizeof(user) + 4; + user.begin = buffer; + user.end = &buffer[sizeof(buffer)]; + user.cpu.vendor = pcv_intel; + user.errata.bdm70 = 1; + + errcode = pt_config_from_user(&config, &user); + ptu_int_eq(errcode, 0); + ptu_uint_eq(config.size, sizeof(config)); + ptu_ptr_eq(config.begin, buffer); + ptu_ptr_eq(config.end, &buffer[sizeof(buffer)]); + ptu_int_eq(config.cpu.vendor, pcv_intel); + ptu_uint_eq(config.errata.bdm70, 1); + + return ptu_passed(); +} + +static struct ptunit_result size(void) +{ + ptu_uint_eq(sizeof(struct pt_errata), 16 * 4); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, from_user_null); + ptu_run(suite, from_user_too_small); + ptu_run(suite, from_user_bad_buffer); + ptu_run(suite, from_user); + ptu_run(suite, from_user_small); + ptu_run(suite, from_user_big); + ptu_run(suite, size); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-cpp.cpp b/libipt/test/src/ptunit-cpp.cpp new file mode 100644 index 0000000..5748c53 --- /dev/null +++ b/libipt/test/src/ptunit-cpp.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "intel-pt.h" + + +static struct ptunit_result init_packet_decoder(void) +{ + uint8_t buf[1]; + struct pt_config config; + struct pt_packet_decoder *decoder; + + pt_config_init(&config); + config.begin = buf; + config.end = buf + sizeof(buf); + + decoder = pt_pkt_alloc_decoder(&config); + ptu_ptr(decoder); + pt_pkt_free_decoder(decoder); + + return ptu_passed(); +} + +static struct ptunit_result init_query_decoder(void) +{ + uint8_t buf[1]; + struct pt_config config; + struct pt_query_decoder *query_decoder; + + pt_config_init(&config); + config.begin = buf; + config.end = buf + sizeof(buf); + + query_decoder = pt_qry_alloc_decoder(&config); + ptu_ptr(query_decoder); + pt_qry_free_decoder(query_decoder); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, init_packet_decoder); + ptu_run(suite, init_query_decoder); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-cpu.c b/libipt/test/src/ptunit-cpu.c new file mode 100644 index 0000000..0ad8fdb --- /dev/null +++ b/libipt/test/src/ptunit-cpu.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_cpu.h" + +#include "intel-pt.h" + +#include + + +void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, + uint32_t *edx) +{ + (void) leaf; + (void) eax; + (void) ebx; + (void) ecx; + (void) edx; +} + + +static struct ptunit_result cpu_valid(void) +{ + struct pt_cpu cpu; + int error; + + error = pt_cpu_parse(&cpu, "6/44/2"); + ptu_int_eq(error, 0); + ptu_int_eq(cpu.vendor, pcv_intel); + ptu_uint_eq(cpu.family, 6); + ptu_uint_eq(cpu.model, 44); + ptu_uint_eq(cpu.stepping, 2); + + error = pt_cpu_parse(&cpu, "0xf/0x2c/0xf"); + ptu_int_eq(error, 0); + ptu_int_eq(cpu.vendor, pcv_intel); + ptu_uint_eq(cpu.family, 0xf); + ptu_uint_eq(cpu.model, 0x2c); + ptu_uint_eq(cpu.stepping, 0xf); + + error = pt_cpu_parse(&cpu, "022/054/017"); + ptu_int_eq(error, 0); + ptu_int_eq(cpu.vendor, pcv_intel); + ptu_uint_eq(cpu.family, 022); + ptu_uint_eq(cpu.model, 054); + ptu_uint_eq(cpu.stepping, 017); + + error = pt_cpu_parse(&cpu, "6/44"); + ptu_int_eq(error, 0); + ptu_int_eq(cpu.vendor, pcv_intel); + ptu_uint_eq(cpu.family, 6); + ptu_uint_eq(cpu.model, 44); + ptu_uint_eq(cpu.stepping, 0); + + return ptu_passed(); +} + +static struct ptunit_result cpu_null(void) +{ + struct pt_cpu cpu; + int error; + + error = pt_cpu_parse(&cpu, NULL); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(NULL, ""); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(NULL, NULL); + ptu_int_eq(error, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result cpu_incomplete(void) +{ + struct pt_cpu cpu; + int error; + + error = pt_cpu_parse(&cpu, ""); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "6"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "6/"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "6//2"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "//"); + ptu_int_eq(error, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result cpu_invalid(void) +{ + struct pt_cpu cpu; + int error; + + error = pt_cpu_parse(&cpu, "e/44/2"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "6/e/2"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "6/44/e"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "65536/44/2"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "6/256/2"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "6/44/256"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "-1/44/2"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "6/-1/2"); + ptu_int_eq(error, -pte_invalid); + + error = pt_cpu_parse(&cpu, "6/44/-1"); + ptu_int_eq(error, -pte_invalid); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, cpu_valid); + ptu_run(suite, cpu_null); + ptu_run(suite, cpu_incomplete); + ptu_run(suite, cpu_invalid); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-event_queue.c b/libipt/test/src/ptunit-event_queue.c new file mode 100644 index 0000000..6958c36 --- /dev/null +++ b/libipt/test/src/ptunit-event_queue.c @@ -0,0 +1,471 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_event_queue.h" + + +/* A test fixture providing an initialized event queue. */ +struct evq_fixture { + /* The event queue. */ + struct pt_event_queue evq; + + /* The test fixture initialization and finalization functions. */ + struct ptunit_result (*init)(struct evq_fixture *); + struct ptunit_result (*fini)(struct evq_fixture *); +}; + + +static struct ptunit_result efix_init(struct evq_fixture *efix) +{ + pt_evq_init(&efix->evq); + + return ptu_passed(); +} + +static struct ptunit_result efix_init_pending(struct evq_fixture *efix) +{ + struct pt_event *ev; + int evb; + + pt_evq_init(&efix->evq); + + for (evb = 0; evb < evb_max; ++evb) { + ev = pt_evq_enqueue(&efix->evq, (enum pt_event_binding) evb); + ptu_ptr(ev); + } + + return ptu_passed(); +} + +static struct ptunit_result standalone_null(void) +{ + struct pt_event *ev; + + ev = pt_evq_standalone(NULL); + ptu_null(ev); + + return ptu_passed(); +} + +static struct ptunit_result standalone(struct evq_fixture *efix) +{ + struct pt_event *ev; + + ev = pt_evq_standalone(&efix->evq); + ptu_ptr(ev); + ptu_uint_eq(ev->ip_suppressed, 0ul); + ptu_uint_eq(ev->status_update, 0ul); + + return ptu_passed(); +} + +static struct ptunit_result enqueue_null(enum pt_event_binding evb) +{ + struct pt_event *ev; + + ev = pt_evq_enqueue(NULL, evb); + ptu_null(ev); + + return ptu_passed(); +} + +static struct ptunit_result dequeue_null(enum pt_event_binding evb) +{ + struct pt_event *ev; + + ev = pt_evq_dequeue(NULL, evb); + ptu_null(ev); + + return ptu_passed(); +} + +static struct ptunit_result dequeue_empty(struct evq_fixture *efix, + enum pt_event_binding evb) +{ + struct pt_event *ev; + + ev = pt_evq_dequeue(&efix->evq, evb); + ptu_null(ev); + + return ptu_passed(); +} + +static struct ptunit_result evq_empty(struct evq_fixture *efix, + enum pt_event_binding evb) +{ + int status; + + status = pt_evq_empty(&efix->evq, evb); + ptu_int_gt(status, 0); + + status = pt_evq_pending(&efix->evq, evb); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result evq_pending(struct evq_fixture *efix, + enum pt_event_binding evb) +{ + int status; + + status = pt_evq_empty(&efix->evq, evb); + ptu_int_eq(status, 0); + + status = pt_evq_pending(&efix->evq, evb); + ptu_int_gt(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result evq_others_empty(struct evq_fixture *efix, + enum pt_event_binding evb) +{ + int other; + + for (other = 0; other < evb_max; ++other) { + enum pt_event_binding ob; + + ob = (enum pt_event_binding) other; + if (ob != evb) + ptu_test(evq_empty, efix, ob); + } + + return ptu_passed(); +} + +static struct ptunit_result enqueue_all_dequeue(struct evq_fixture *efix, + enum pt_event_binding evb, + size_t num) +{ + struct pt_event *in[evq_max], *out[evq_max]; + size_t idx; + + ptu_uint_le(num, evq_max - 2); + + for (idx = 0; idx < num; ++idx) { + in[idx] = pt_evq_enqueue(&efix->evq, evb); + ptu_ptr(in[idx]); + } + + ptu_test(evq_pending, efix, evb); + ptu_test(evq_others_empty, efix, evb); + + for (idx = 0; idx < num; ++idx) { + out[idx] = pt_evq_dequeue(&efix->evq, evb); + ptu_ptr_eq(out[idx], in[idx]); + } + + ptu_test(evq_empty, efix, evb); + + return ptu_passed(); +} + +static struct ptunit_result enqueue_one_dequeue(struct evq_fixture *efix, + enum pt_event_binding evb, + size_t num) +{ + size_t idx; + + for (idx = 0; idx < num; ++idx) { + struct pt_event *in, *out; + + in = pt_evq_enqueue(&efix->evq, evb); + ptu_ptr(in); + + out = pt_evq_dequeue(&efix->evq, evb); + ptu_ptr_eq(out, in); + } + + return ptu_passed(); +} + +static struct ptunit_result overflow(struct evq_fixture *efix, + enum pt_event_binding evb, + size_t num) +{ + struct pt_event *in[evq_max], *out[evq_max], *ev; + size_t idx; + + ptu_uint_le(num, evq_max - 2); + + for (idx = 0; idx < (evq_max - 2); ++idx) { + in[idx] = pt_evq_enqueue(&efix->evq, evb); + ptu_ptr(in[idx]); + } + + for (idx = 0; idx < num; ++idx) { + ev = pt_evq_enqueue(&efix->evq, evb); + ptu_null(ev); + } + + for (idx = 0; idx < num; ++idx) { + out[idx] = pt_evq_dequeue(&efix->evq, evb); + ptu_ptr_eq(out[idx], in[idx]); + } + + return ptu_passed(); +} + +static struct ptunit_result clear_null(enum pt_event_binding evb) +{ + int errcode; + + errcode = pt_evq_clear(NULL, evb); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result clear(struct evq_fixture *efix, + enum pt_event_binding evb) +{ + int errcode; + + errcode = pt_evq_clear(&efix->evq, evb); + ptu_int_eq(errcode, 0); + + ptu_test(evq_empty, efix, evb); + + return ptu_passed(); +} + +static struct ptunit_result empty_null(enum pt_event_binding evb) +{ + int errcode; + + errcode = pt_evq_empty(NULL, evb); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result pending_null(enum pt_event_binding evb) +{ + int errcode; + + errcode = pt_evq_pending(NULL, evb); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result find_null(enum pt_event_binding evb, + enum pt_event_type evt) +{ + struct pt_event *ev; + + ev = pt_evq_find(NULL, evb, evt); + ptu_null(ev); + + return ptu_passed(); +} + +static struct ptunit_result find_empty(struct evq_fixture *efix, + enum pt_event_binding evb, + enum pt_event_type evt) +{ + struct pt_event *ev; + + ev = pt_evq_find(&efix->evq, evb, evt); + ptu_null(ev); + + return ptu_passed(); +} + +static struct ptunit_result find_none_evb(struct evq_fixture *efix, + enum pt_event_binding evb, + enum pt_event_type evt) +{ + struct pt_event *ev; + size_t other; + + for (other = 0; other < evb_max; ++other) { + enum pt_event_binding ob; + + ob = (enum pt_event_binding) other; + if (ob != evb) { + ev = pt_evq_enqueue(&efix->evq, ob); + ptu_ptr(ev); + + ev->type = evt; + } + } + + ev = pt_evq_find(&efix->evq, evb, evt); + ptu_null(ev); + + return ptu_passed(); +} + +static struct ptunit_result evq_enqueue_other(struct evq_fixture *efix, + enum pt_event_binding evb, + enum pt_event_type evt, + size_t num) +{ + enum pt_event_type ot; + struct pt_event *ev; + size_t other; + + for (other = 0; other < num; ++other) { + ot = (enum pt_event_type) other; + if (ot != evt) { + ev = pt_evq_enqueue(&efix->evq, evb); + ptu_ptr(ev); + + ev->type = ot; + } + } + + return ptu_passed(); +} + +static struct ptunit_result find_none_evt(struct evq_fixture *efix, + enum pt_event_binding evb, + enum pt_event_type evt, + size_t num) +{ + struct pt_event *ev; + + ptu_test(evq_enqueue_other, efix, evb, evt, num); + + ev = pt_evq_find(&efix->evq, evb, evt); + ptu_null(ev); + + return ptu_passed(); +} + +static struct ptunit_result find(struct evq_fixture *efix, + enum pt_event_binding evb, + enum pt_event_type evt, + size_t before, size_t after) +{ + struct pt_event *in, *out; + + ptu_test(evq_enqueue_other, efix, evb, evt, before); + + in = pt_evq_enqueue(&efix->evq, evb); + ptu_ptr(in); + + in->type = evt; + + ptu_test(evq_enqueue_other, efix, evb, evt, after); + + out = pt_evq_find(&efix->evq, evb, evt); + ptu_ptr_eq(out, in); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct evq_fixture efix, pfix; + struct ptunit_suite suite; + + efix.init = efix_init; + efix.fini = NULL; + + pfix.init = efix_init_pending; + pfix.fini = NULL; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, standalone_null); + ptu_run_f(suite, standalone, efix); + + ptu_run_p(suite, enqueue_null, evb_psbend); + ptu_run_p(suite, enqueue_null, evb_tip); + ptu_run_p(suite, enqueue_null, evb_fup); + + ptu_run_p(suite, dequeue_null, evb_psbend); + ptu_run_p(suite, dequeue_null, evb_tip); + ptu_run_p(suite, dequeue_null, evb_fup); + + ptu_run_fp(suite, dequeue_empty, efix, evb_psbend); + ptu_run_fp(suite, dequeue_empty, efix, evb_tip); + ptu_run_fp(suite, dequeue_empty, efix, evb_fup); + + ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_psbend, 1); + ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_psbend, 2); + ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_tip, 1); + ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_tip, 3); + ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_fup, 1); + ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_fup, 4); + + ptu_run_fp(suite, enqueue_one_dequeue, efix, evb_psbend, evb_max * 2); + ptu_run_fp(suite, enqueue_one_dequeue, efix, evb_tip, evb_max * 2); + ptu_run_fp(suite, enqueue_one_dequeue, efix, evb_fup, evb_max * 2); + + ptu_run_fp(suite, overflow, efix, evb_psbend, 1); + ptu_run_fp(suite, overflow, efix, evb_tip, 2); + ptu_run_fp(suite, overflow, efix, evb_fup, 3); + + ptu_run_p(suite, clear_null, evb_psbend); + ptu_run_p(suite, clear_null, evb_tip); + ptu_run_p(suite, clear_null, evb_fup); + + ptu_run_fp(suite, clear, efix, evb_psbend); + ptu_run_fp(suite, clear, pfix, evb_psbend); + ptu_run_fp(suite, clear, efix, evb_tip); + ptu_run_fp(suite, clear, pfix, evb_tip); + ptu_run_fp(suite, clear, efix, evb_fup); + ptu_run_fp(suite, clear, pfix, evb_fup); + + ptu_run_p(suite, empty_null, evb_psbend); + ptu_run_p(suite, empty_null, evb_tip); + ptu_run_p(suite, empty_null, evb_fup); + + ptu_run_p(suite, pending_null, evb_psbend); + ptu_run_p(suite, pending_null, evb_tip); + ptu_run_p(suite, pending_null, evb_fup); + + ptu_run_p(suite, find_null, evb_psbend, ptev_enabled); + ptu_run_p(suite, find_null, evb_tip, ptev_disabled); + ptu_run_p(suite, find_null, evb_fup, ptev_paging); + + ptu_run_fp(suite, find_empty, efix, evb_psbend, ptev_enabled); + ptu_run_fp(suite, find_empty, efix, evb_tip, ptev_disabled); + ptu_run_fp(suite, find_empty, efix, evb_fup, ptev_paging); + + ptu_run_fp(suite, find_none_evb, efix, evb_psbend, ptev_enabled); + ptu_run_fp(suite, find_none_evb, efix, evb_tip, ptev_disabled); + ptu_run_fp(suite, find_none_evb, efix, evb_fup, ptev_paging); + + ptu_run_fp(suite, find_none_evt, efix, evb_psbend, ptev_enabled, 3); + ptu_run_fp(suite, find_none_evt, efix, evb_tip, ptev_disabled, 4); + ptu_run_fp(suite, find_none_evt, efix, evb_fup, ptev_paging, 2); + + ptu_run_fp(suite, find, efix, evb_psbend, ptev_enabled, 0, 3); + ptu_run_fp(suite, find, efix, evb_tip, ptev_disabled, 2, 0); + ptu_run_fp(suite, find, efix, evb_fup, ptev_paging, 1, 4); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-fetch.c b/libipt/test/src/ptunit-fetch.c new file mode 100644 index 0000000..2703a74 --- /dev/null +++ b/libipt/test/src/ptunit-fetch.c @@ -0,0 +1,596 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_decoder_function.h" +#include "pt_encoder.h" + +#include "intel-pt.h" + + +/* A test fixture for decoder function fetch tests. */ +struct fetch_fixture { + /* The trace buffer. */ + uint8_t buffer[1024]; + + /* A trace configuration. */ + struct pt_config config; + + /* A trace encoder. */ + struct pt_encoder encoder; + + /* The test fixture initialization and finalization functions. */ + struct ptunit_result (*init)(struct fetch_fixture *); + struct ptunit_result (*fini)(struct fetch_fixture *); +}; + +static struct ptunit_result ffix_init(struct fetch_fixture *ffix) +{ + memset(ffix->buffer, pt_opc_bad, sizeof(ffix->buffer)); + + memset(&ffix->config, 0, sizeof(ffix->config)); + ffix->config.size = sizeof(ffix->config); + ffix->config.begin = ffix->buffer; + ffix->config.end = ffix->buffer + sizeof(ffix->buffer); + + pt_encoder_init(&ffix->encoder, &ffix->config); + + return ptu_passed(); +} + +static struct ptunit_result ffix_fini(struct fetch_fixture *ffix) +{ + pt_encoder_fini(&ffix->encoder); + + return ptu_passed(); +} + + +static struct ptunit_result fetch_null(struct fetch_fixture *ffix) +{ + const struct pt_decoder_function *dfun; + int errcode; + + errcode = pt_df_fetch(NULL, ffix->config.begin, &ffix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_df_fetch(&dfun, NULL, &ffix->config); + ptu_int_eq(errcode, -pte_nosync); + + errcode = pt_df_fetch(&dfun, ffix->config.begin, NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result fetch_empty(struct fetch_fixture *ffix) +{ + const struct pt_decoder_function *dfun; + int errcode; + + errcode = pt_df_fetch(&dfun, ffix->config.end, &ffix->config); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result fetch_unknown(struct fetch_fixture *ffix) +{ + const struct pt_decoder_function *dfun; + int errcode; + + ffix->config.begin[0] = pt_opc_bad; + + errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config); + ptu_int_eq(errcode, 0); + ptu_ptr_eq(dfun, &pt_decode_unknown); + + return ptu_passed(); +} + +static struct ptunit_result fetch_unknown_ext(struct fetch_fixture *ffix) +{ + const struct pt_decoder_function *dfun; + int errcode; + + ffix->config.begin[0] = pt_opc_ext; + ffix->config.begin[1] = pt_ext_bad; + + errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config); + ptu_int_eq(errcode, 0); + ptu_ptr_eq(dfun, &pt_decode_unknown); + + return ptu_passed(); +} + +static struct ptunit_result fetch_unknown_ext2(struct fetch_fixture *ffix) +{ + const struct pt_decoder_function *dfun; + int errcode; + + ffix->config.begin[0] = pt_opc_ext; + ffix->config.begin[1] = pt_ext_ext2; + ffix->config.begin[2] = pt_ext2_bad; + + errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config); + ptu_int_eq(errcode, 0); + ptu_ptr_eq(dfun, &pt_decode_unknown); + + return ptu_passed(); +} + +static struct ptunit_result fetch_packet(struct fetch_fixture *ffix, + const struct pt_packet *packet, + const struct pt_decoder_function *df) +{ + const struct pt_decoder_function *dfun; + int errcode; + + errcode = pt_enc_next(&ffix->encoder, packet); + ptu_int_ge(errcode, 0); + + errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config); + ptu_int_eq(errcode, 0); + ptu_ptr_eq(dfun, df); + + return ptu_passed(); +} + +static struct ptunit_result fetch_type(struct fetch_fixture *ffix, + enum pt_packet_type type, + const struct pt_decoder_function *dfun) +{ + struct pt_packet packet; + + memset(&packet, 0, sizeof(packet)); + packet.type = type; + + ptu_test(fetch_packet, ffix, &packet, dfun); + + return ptu_passed(); +} + +static struct ptunit_result fetch_tnt_8(struct fetch_fixture *ffix) +{ + struct pt_packet packet; + + memset(&packet, 0, sizeof(packet)); + packet.type = ppt_tnt_8; + packet.payload.tnt.bit_size = 1; + + ptu_test(fetch_packet, ffix, &packet, &pt_decode_tnt_8); + + return ptu_passed(); +} + +static struct ptunit_result fetch_mode_exec(struct fetch_fixture *ffix) +{ + struct pt_packet packet; + + memset(&packet, 0, sizeof(packet)); + packet.type = ppt_mode; + packet.payload.mode.leaf = pt_mol_exec; + + ptu_test(fetch_packet, ffix, &packet, &pt_decode_mode); + + return ptu_passed(); +} + +static struct ptunit_result fetch_mode_tsx(struct fetch_fixture *ffix) +{ + struct pt_packet packet; + + memset(&packet, 0, sizeof(packet)); + packet.type = ppt_mode; + packet.payload.mode.leaf = pt_mol_tsx; + + ptu_test(fetch_packet, ffix, &packet, &pt_decode_mode); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct fetch_fixture ffix; + struct ptunit_suite suite; + + ffix.init = ffix_init; + ffix.fini = ffix_fini; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run_f(suite, fetch_null, ffix); + ptu_run_f(suite, fetch_empty, ffix); + + ptu_run_f(suite, fetch_unknown, ffix); + ptu_run_f(suite, fetch_unknown_ext, ffix); + ptu_run_f(suite, fetch_unknown_ext2, ffix); + + ptu_run_fp(suite, fetch_type, ffix, ppt_pad, &pt_decode_pad); + ptu_run_fp(suite, fetch_type, ffix, ppt_psb, &pt_decode_psb); + ptu_run_fp(suite, fetch_type, ffix, ppt_tip, &pt_decode_tip); + ptu_run_fp(suite, fetch_type, ffix, ppt_tnt_64, &pt_decode_tnt_64); + ptu_run_fp(suite, fetch_type, ffix, ppt_tip_pge, &pt_decode_tip_pge); + ptu_run_fp(suite, fetch_type, ffix, ppt_tip_pgd, &pt_decode_tip_pgd); + ptu_run_fp(suite, fetch_type, ffix, ppt_fup, &pt_decode_fup); + ptu_run_fp(suite, fetch_type, ffix, ppt_pip, &pt_decode_pip); + ptu_run_fp(suite, fetch_type, ffix, ppt_ovf, &pt_decode_ovf); + ptu_run_fp(suite, fetch_type, ffix, ppt_psbend, &pt_decode_psbend); + ptu_run_fp(suite, fetch_type, ffix, ppt_tsc, &pt_decode_tsc); + ptu_run_fp(suite, fetch_type, ffix, ppt_cbr, &pt_decode_cbr); + ptu_run_fp(suite, fetch_type, ffix, ppt_tma, &pt_decode_tma); + ptu_run_fp(suite, fetch_type, ffix, ppt_mtc, &pt_decode_mtc); + ptu_run_fp(suite, fetch_type, ffix, ppt_cyc, &pt_decode_cyc); + ptu_run_fp(suite, fetch_type, ffix, ppt_stop, &pt_decode_stop); + ptu_run_fp(suite, fetch_type, ffix, ppt_vmcs, &pt_decode_vmcs); + ptu_run_fp(suite, fetch_type, ffix, ppt_mnt, &pt_decode_mnt); + + ptu_run_f(suite, fetch_tnt_8, ffix); + ptu_run_f(suite, fetch_mode_exec, ffix); + ptu_run_f(suite, fetch_mode_tsx, ffix); + + ptunit_report(&suite); + return suite.nr_fails; +} + + +/* Dummy decode functions to satisfy link dependencies. + * + * As a nice side-effect, we will know if we need to add more tests when + * adding new decoder functions. + */ +int pt_pkt_decode_unknown(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_unknown(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_pad(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_pad(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_psb(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_psb(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_tip(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_tip(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_tnt_8(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_tnt_8(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_tnt_64(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_tnt_64(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_tip_pge(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_tip_pge(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_tip_pgd(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_tip_pgd(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_fup(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_fup(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_fup(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_pip(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_pip(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_pip(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_ovf(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_ovf(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_mode(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_mode(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_mode(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_psbend(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_psbend(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_tsc(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_tsc(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_tsc(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_cbr(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_cbr(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_cbr(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_tma(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_tma(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_mtc(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_mtc(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_cyc(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_cyc(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_stop(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_stop(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_vmcs(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_vmcs(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_vmcs(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} + +int pt_pkt_decode_mnt(struct pt_packet_decoder *d, struct pt_packet *p) +{ + (void) d; + (void) p; + + return -pte_internal; +} +int pt_qry_decode_mnt(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} diff --git a/libipt/test/src/ptunit-ild.c b/libipt/test/src/ptunit-ild.c new file mode 100644 index 0000000..d9dd249 --- /dev/null +++ b/libipt/test/src/ptunit-ild.c @@ -0,0 +1,716 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_ild.h" + +#include + + +/* Check that an instruction is decoded correctly. */ +static struct ptunit_result ptunit_ild_decode(uint8_t *raw, uint8_t size, + enum pt_exec_mode mode) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + int errcode; + + memset(&iext, 0, sizeof(iext)); + memset(&insn, 0, sizeof(insn)); + + memcpy(insn.raw, raw, size); + insn.size = size; + insn.mode = mode; + + errcode = pt_ild_decode(&insn, &iext); + ptu_int_eq(errcode, 0); + + ptu_uint_eq(insn.size, size); + ptu_int_eq(insn.iclass, ptic_other); + ptu_int_eq(iext.iclass, PTI_INST_INVALID); + + return ptu_passed(); +} + +/* Check that an instruction is decoded and classified correctly. */ +static struct ptunit_result ptunit_ild_classify(uint8_t *raw, uint8_t size, + enum pt_exec_mode mode, + pti_inst_enum_t iclass) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + int errcode; + + memset(&iext, 0, sizeof(iext)); + memset(&insn, 0, sizeof(insn)); + + memcpy(insn.raw, raw, size); + insn.size = size; + insn.mode = mode; + + errcode = pt_ild_decode(&insn, &iext); + ptu_int_eq(errcode, 0); + + ptu_uint_eq(insn.size, size); + ptu_int_eq(iext.iclass, iclass); + + return ptu_passed(); +} + +/* Check that an invalid instruction is detected correctly. + * + * Note that we intentionally do not detect all invalid instructions. This test + * therefore only covers some that we care about. + */ +static struct ptunit_result ptunit_ild_invalid(uint8_t *raw, uint8_t size, + enum pt_exec_mode mode) +{ + struct pt_insn_ext iext; + struct pt_insn insn; + int errcode; + + memset(&iext, 0, sizeof(iext)); + memset(&insn, 0, sizeof(insn)); + + memcpy(insn.raw, raw, size); + insn.size = size; + insn.mode = mode; + + errcode = pt_ild_decode(&insn, &iext); + ptu_int_eq(errcode, -pte_bad_insn); + + return ptu_passed(); +} + + +/* Macros to automatically update the test location. */ +#define ptu_decode(insn, size, mode) \ + ptu_check(ptunit_ild_decode, insn, size, mode) + +#define ptu_classify(insn, size, mode, iclass) \ + ptu_check(ptunit_ild_classify, insn, size, mode, iclass) + +/* Macros to also automatically supply the instruction size. */ +#define ptu_decode_s(insn, mode) \ + ptu_decode(insn, sizeof(insn), mode) + +#define ptu_classify_s(insn, mode, iclass) \ + ptu_classify(insn, sizeof(insn), mode, iclass) + +#define ptu_invalid_s(insn, mode) \ + ptu_check(ptunit_ild_invalid, insn, sizeof(insn), mode) + + +static struct ptunit_result push(void) +{ + uint8_t insn[] = { 0x68, 0x11, 0x22, 0x33, 0x44 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result jmp_rel(void) +{ + uint8_t insn[] = { 0xE9, 0x60, 0xF9, 0xFF, 0xFF }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_JMP_E9); + + return ptu_passed(); +} + +static struct ptunit_result long_nop(void) +{ + uint8_t insn[] = { 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0X2E, 0X0F, + 0X1F, 0x84, 0x00, 0x00, + 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_al_64(void) +{ + uint8_t insn[] = { 0x48, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, + 0xff, 0x11 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_al_32_em64(void) +{ + uint8_t insn[] = { 0x67, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, + 0xff, 0X11 }; + + ptu_decode(insn, 6, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_al_32(void) +{ + uint8_t insn[] = { 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee }; + + ptu_decode(insn, 5, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_al_32_em16(void) +{ + uint8_t insn[] = { 0x67, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee }; + + ptu_decode(insn, 6, ptem_16bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_al_16_em32(void) +{ + uint8_t insn[] = { 0x67, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee }; + + ptu_decode(insn, 4, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_al_16(void) +{ + uint8_t insn[] = { 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee }; + + ptu_decode(insn, 3, ptem_16bit); + + return ptu_passed(); +} + +static struct ptunit_result rdtsc(void) +{ + uint8_t insn[] = { 0x0f, 0x31 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result pcmpistri(void) +{ + uint8_t insn[] = { 0x66, 0x0f, 0x3a, 0x63, 0x04, 0x16, 0x1a }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result vmovdqa(void) +{ + uint8_t insn[] = { 0xc5, 0xf9, 0x6f, 0x25, 0xa9, 0x55, 0x04, 0x00 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result vpandn(void) +{ + uint8_t insn[] = { 0xc4, 0x41, 0x29, 0xdf, 0xd1 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result syscall(void) +{ + uint8_t insn[] = { 0x0f, 0x05 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSCALL); + + return ptu_passed(); +} + +static struct ptunit_result sysret(void) +{ + uint8_t insn[] = { 0x0f, 0x07 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSRET); + + return ptu_passed(); +} + +static struct ptunit_result sysenter(void) +{ + uint8_t insn[] = { 0x0f, 0x34 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSENTER); + + return ptu_passed(); +} + +static struct ptunit_result sysexit(void) +{ + uint8_t insn[] = { 0x0f, 0x35 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSEXIT); + + return ptu_passed(); +} + +static struct ptunit_result int3(void) +{ + uint8_t insn[] = { 0xcc }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_INT3); + + return ptu_passed(); +} + +static struct ptunit_result intn(void) +{ + uint8_t insn[] = { 0xcd, 0x06 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_INT); + + return ptu_passed(); +} + +static struct ptunit_result iret(void) +{ + uint8_t insn[] = { 0xcf }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_IRET); + + return ptu_passed(); +} + +static struct ptunit_result call_9a_cd(void) +{ + uint8_t insn[] = { 0x9a, 0x00, 0x00, 0x00, 0x00 }; + + ptu_classify_s(insn, ptem_16bit, PTI_INST_CALL_9A); + + return ptu_passed(); +} + +static struct ptunit_result call_9a_cp(void) +{ + uint8_t insn[] = { 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + + ptu_classify_s(insn, ptem_32bit, PTI_INST_CALL_9A); + + return ptu_passed(); +} + +static struct ptunit_result call_ff_3(void) +{ + uint8_t insn[] = { 0xff, 0x1c, 0x25, 0x00, 0x00, 0x00, 0x00 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_CALL_FFr3); + + return ptu_passed(); +} + +static struct ptunit_result jmp_ff_5(void) +{ + uint8_t insn[] = { 0xff, 0x2c, 0x25, 0x00, 0x00, 0x00, 0x00 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_JMP_FFr5); + + return ptu_passed(); +} + +static struct ptunit_result jmp_ea_cd(void) +{ + uint8_t insn[] = { 0xea, 0x00, 0x00, 0x00, 0x00 }; + + ptu_classify_s(insn, ptem_16bit, PTI_INST_JMP_EA); + + return ptu_passed(); +} + +static struct ptunit_result jmp_ea_cp(void) +{ + uint8_t insn[] = { 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + + ptu_classify_s(insn, ptem_32bit, PTI_INST_JMP_EA); + + return ptu_passed(); +} + +static struct ptunit_result ret_ca(void) +{ + uint8_t insn[] = { 0xca, 0x00, 0x00 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_RET_CA); + + return ptu_passed(); +} + +static struct ptunit_result vmlaunch(void) +{ + uint8_t insn[] = { 0x0f, 0x01, 0xc2 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_VMLAUNCH); + + return ptu_passed(); +} + +static struct ptunit_result vmresume(void) +{ + uint8_t insn[] = { 0x0f, 0x01, 0xc3 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_VMRESUME); + + return ptu_passed(); +} + +static struct ptunit_result vmcall(void) +{ + uint8_t insn[] = { 0x0f, 0x01, 0xc1 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_VMCALL); + + return ptu_passed(); +} + +static struct ptunit_result vmptrld(void) +{ + uint8_t insn[] = { 0x0f, 0xc7, 0x30 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_VMPTRLD); + + return ptu_passed(); +} + +static struct ptunit_result jrcxz(void) +{ + uint8_t insn[] = { 0xe3, 0x00 }; + + ptu_classify_s(insn, ptem_64bit, PTI_INST_JrCXZ); + + return ptu_passed(); +} + +static struct ptunit_result mov_eax_moffs64(void) +{ + uint8_t insn[] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_eax_moffs64_32(void) +{ + uint8_t insn[] = { 0x67, 0xa1, 0x00, 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_rax_moffs64(void) +{ + uint8_t insn[] = { 0x48, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_rax_moffs64_32(void) +{ + uint8_t insn[] = { 0x67, 0x48, 0xa1, 0x00, 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_ax_moffs64(void) +{ + uint8_t insn[] = { 0x66, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_ax_moffs64_32(void) +{ + uint8_t insn[] = { 0x67, 0x66, 0xa1, 0x00, 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_eax_moffs32(void) +{ + uint8_t insn[] = { 0xa1, 0x00, 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_ax_moffs32(void) +{ + uint8_t insn[] = { 0x66, 0xa1, 0x00, 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result mov_ax_moffs16(void) +{ + uint8_t insn[] = { 0xa1, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_16bit); + + return ptu_passed(); +} + +static struct ptunit_result les(void) +{ + uint8_t insn[] = { 0xc4, 0x00 }; + + ptu_decode_s(insn, ptem_16bit); + ptu_decode_s(insn, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result les_disp16(void) +{ + uint8_t insn[] = { 0xc4, 0x06, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_16bit); + + return ptu_passed(); +} + +static struct ptunit_result les_disp32(void) +{ + uint8_t insn[] = { 0xc4, 0x05, 0x00, 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result les_ind_disp8(void) +{ + uint8_t insn[] = { 0xc4, 0x40, 0x00 }; + + ptu_decode_s(insn, ptem_16bit); + ptu_decode_s(insn, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result les_ind_disp16(void) +{ + uint8_t insn[] = { 0xc4, 0x80, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_16bit); + + return ptu_passed(); +} + +static struct ptunit_result les_ind_disp32(void) +{ + uint8_t insn[] = { 0xc4, 0x80, 0x00, 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result lds(void) +{ + uint8_t insn[] = { 0xc5, 0x00 }; + + ptu_decode_s(insn, ptem_16bit); + ptu_decode_s(insn, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result lds_disp16(void) +{ + uint8_t insn[] = { 0xc5, 0x06, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_16bit); + + return ptu_passed(); +} + +static struct ptunit_result lds_disp32(void) +{ + uint8_t insn[] = { 0xc5, 0x05, 0x00, 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result lds_ind_disp8(void) +{ + uint8_t insn[] = { 0xc5, 0x40, 0x00 }; + + ptu_decode_s(insn, ptem_16bit); + ptu_decode_s(insn, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result lds_ind_disp16(void) +{ + uint8_t insn[] = { 0xc5, 0x80, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_16bit); + + return ptu_passed(); +} + +static struct ptunit_result lds_ind_disp32(void) +{ + uint8_t insn[] = { 0xc5, 0x80, 0x00, 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_32bit); + + return ptu_passed(); +} + +static struct ptunit_result vpshufb(void) +{ + uint8_t insn[] = { 0x62, 0x02, 0x05, 0x00, 0x00, 0x00 }; + + ptu_decode_s(insn, ptem_64bit); + + return ptu_passed(); +} + +static struct ptunit_result bound(void) +{ + uint8_t insn[] = { 0x62, 0x02 }; + + ptu_decode_s(insn, ptem_32bit); + ptu_decode_s(insn, ptem_16bit); + + return ptu_passed(); +} + +static struct ptunit_result evex_cutoff(void) +{ + uint8_t insn[] = { 0x62 }; + + ptu_invalid_s(insn, ptem_64bit); + ptu_invalid_s(insn, ptem_32bit); + ptu_invalid_s(insn, ptem_16bit); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + + pt_ild_init(); + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, push); + ptu_run(suite, jmp_rel); + ptu_run(suite, long_nop); + ptu_run(suite, mov_al_64); + ptu_run(suite, mov_al_32); + ptu_run(suite, mov_al_32_em64); + ptu_run(suite, mov_al_32_em16); + ptu_run(suite, mov_al_16_em32); + ptu_run(suite, mov_al_16); + ptu_run(suite, rdtsc); + ptu_run(suite, pcmpistri); + ptu_run(suite, vmovdqa); + ptu_run(suite, vpandn); + ptu_run(suite, syscall); + ptu_run(suite, sysret); + ptu_run(suite, sysenter); + ptu_run(suite, sysexit); + ptu_run(suite, int3); + ptu_run(suite, intn); + ptu_run(suite, iret); + ptu_run(suite, call_9a_cd); + ptu_run(suite, call_9a_cp); + ptu_run(suite, call_ff_3); + ptu_run(suite, jmp_ff_5); + ptu_run(suite, jmp_ea_cd); + ptu_run(suite, jmp_ea_cp); + ptu_run(suite, ret_ca); + ptu_run(suite, vmlaunch); + ptu_run(suite, vmresume); + ptu_run(suite, vmcall); + ptu_run(suite, vmptrld); + ptu_run(suite, jrcxz); + ptu_run(suite, mov_eax_moffs64); + ptu_run(suite, mov_eax_moffs64_32); + ptu_run(suite, mov_rax_moffs64); + ptu_run(suite, mov_rax_moffs64_32); + ptu_run(suite, mov_ax_moffs64); + ptu_run(suite, mov_ax_moffs64_32); + ptu_run(suite, mov_eax_moffs32); + ptu_run(suite, mov_ax_moffs32); + ptu_run(suite, mov_ax_moffs16); + ptu_run(suite, les); + ptu_run(suite, les_disp16); + ptu_run(suite, les_disp32); + ptu_run(suite, les_ind_disp8); + ptu_run(suite, les_ind_disp16); + ptu_run(suite, les_ind_disp32); + ptu_run(suite, lds); + ptu_run(suite, lds_disp16); + ptu_run(suite, lds_disp32); + ptu_run(suite, lds_ind_disp8); + ptu_run(suite, lds_ind_disp16); + ptu_run(suite, lds_ind_disp32); + ptu_run(suite, vpshufb); + ptu_run(suite, bound); + ptu_run(suite, evex_cutoff); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-image.c b/libipt/test/src/ptunit-image.c new file mode 100644 index 0000000..f83317e --- /dev/null +++ b/libipt/test/src/ptunit-image.c @@ -0,0 +1,2239 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_image.h" +#include "pt_section.h" +#include "pt_mapped_section.h" + +#include "intel-pt.h" + + +struct image_fixture; + +/* A test mapping. */ +struct ifix_mapping { + /* The contents. */ + uint8_t content[0x10]; + + /* The size - between 0 and sizeof(content). */ + uint64_t size; + + /* An artificial error code to be injected into pt_section_read(). + * + * If @errcode is non-zero, pt_section_read() fails with @errcode. + */ + int errcode; +}; + +/* A test file status - turned into a section status. */ +struct ifix_status { + /* Delete indication: + * - zero if initialized and not (yet) deleted + * - non-zero if deleted and not (re-)initialized + */ + int deleted; + + /* Put with use-count of zero indication. */ + int bad_put; + + /* The test mapping to be used. */ + struct ifix_mapping *mapping; + + /* A link back to the test fixture providing this section. */ + struct image_fixture *ifix; +}; + +enum { + ifix_nsecs = 5 +}; + +/* A fake image section cache. */ +struct pt_image_section_cache { + /* The cached sections. */ + struct pt_section *section[ifix_nsecs]; + + /* Their load addresses. */ + uint64_t laddr[ifix_nsecs]; + + /* The number of used sections. */ + int nsecs; +}; + +/* A test fixture providing an image, test sections, and asids. */ +struct image_fixture { + /* The image. */ + struct pt_image image; + + /* The test states. */ + struct ifix_status status[ifix_nsecs]; + + /* The test mappings. */ + struct ifix_mapping mapping[ifix_nsecs]; + + /* The sections. */ + struct pt_section section[ifix_nsecs]; + + /* The asids. */ + struct pt_asid asid[3]; + + /* The number of used sections/mappings/states. */ + int nsecs; + + /* An initially empty image as destination for image copies. */ + struct pt_image copy; + + /* A test section cache. */ + struct pt_image_section_cache iscache; + + /* The test fixture initialization and finalization functions. */ + struct ptunit_result (*init)(struct image_fixture *); + struct ptunit_result (*fini)(struct image_fixture *); +}; + +static void ifix_init_section(struct pt_section *section, char *filename, + struct ifix_status *status, + struct ifix_mapping *mapping, + struct image_fixture *ifix) +{ + uint8_t i; + + memset(section, 0, sizeof(*section)); + + section->filename = filename; + section->status = status; + section->size = mapping->size = sizeof(mapping->content); + section->offset = 0x10; + + for (i = 0; i < mapping->size; ++i) + mapping->content[i] = i; + + status->deleted = 0; + status->bad_put = 0; + status->mapping = mapping; + status->ifix = ifix; +} + +static int ifix_add_section(struct image_fixture *ifix, char *filename) +{ + int index; + + if (!ifix) + return -pte_internal; + + index = ifix->nsecs; + if (ifix_nsecs <= index) + return -pte_internal; + + ifix_init_section(&ifix->section[index], filename, &ifix->status[index], + &ifix->mapping[index], ifix); + + ifix->nsecs += 1; + return index; +} + +static int ifix_cache_section(struct image_fixture *ifix, + struct pt_section *section, uint64_t laddr) +{ + int index; + + if (!ifix) + return -pte_internal; + + index = ifix->iscache.nsecs; + if (ifix_nsecs <= index) + return -pte_internal; + + ifix->iscache.section[index] = section; + ifix->iscache.laddr[index] = laddr; + + index += 1; + ifix->iscache.nsecs = index; + + return index; +} + +int pt_section_clone(struct pt_section **clone, + const struct pt_section *section, uint64_t offset, + uint64_t size) +{ + struct image_fixture *ifix; + struct ifix_mapping *mapping, *smapping; + struct ifix_status *status; + uint64_t begin, end, sbegin, send, start; + int index; + + if (!clone || !section) + return -pte_internal; + + status = section->status; + if (!status || status->deleted) + return -pte_internal; + + begin = offset; + end = begin + size; + + if (end <= begin) + return -pte_bad_image; + + sbegin = pt_section_offset(section); + send = sbegin + pt_section_size(section); + + if (send <= sbegin) + return -pte_internal; + + if ((begin < sbegin) || (send < end)) + return -pte_invalid; + + start = begin - sbegin; + + ifix = status->ifix; + if (!ifix) + return -pte_internal; + + index = ifix_add_section(ifix, section->filename); + if (index < 0) + return index; + + mapping = &ifix->mapping[index]; + mapping->size = size; + + smapping = status->mapping; + if (!smapping) + return -pte_internal; + + memset(mapping->content, 0xcd, sizeof(mapping->content)); + memcpy(mapping->content, &smapping->content[start], (size_t) size); + + ifix->section[index].size = size; + ifix->section[index].offset = offset; + ifix->section[index].ucount = 1; + *clone = &ifix->section[index]; + return 0; +} + +const char *pt_section_filename(const struct pt_section *section) +{ + if (!section) + return NULL; + + return section->filename; +} + +uint64_t pt_section_offset(const struct pt_section *section) +{ + if (!section) + return 0ull; + + return section->offset; +} + +uint64_t pt_section_size(const struct pt_section *section) +{ + if (!section) + return 0ull; + + return section->size; +} + +struct pt_section *pt_mk_section(const char *file, uint64_t offset, + uint64_t size) +{ + (void) file; + (void) offset; + (void) size; + + /* This function is not used by our tests. */ + return NULL; +} + +int pt_section_get(struct pt_section *section) +{ + if (!section) + return -pte_internal; + + section->ucount += 1; + return 0; +} + +int pt_section_put(struct pt_section *section) +{ + struct ifix_status *status; + uint16_t ucount; + + if (!section) + return -pte_internal; + + status = section->status; + if (!status) + return -pte_internal; + + ucount = section->ucount; + if (!ucount) { + status->bad_put += 1; + + return -pte_internal; + } + + ucount = --section->ucount; + if (!ucount) { + status->deleted += 1; + + if (status->deleted > 1) + return -pte_internal; + } + + return 0; +} + +int pt_iscache_lookup(struct pt_image_section_cache *iscache, + struct pt_section **section, uint64_t *laddr, int isid) +{ + if (!iscache || !section || !laddr) + return -pte_internal; + + if (!isid || iscache->nsecs < isid) + return -pte_bad_image; + + isid -= 1; + + *section = iscache->section[isid]; + *laddr = iscache->laddr[isid]; + + return pt_section_get(*section); +} + +static int ifix_unmap(struct pt_section *section) +{ + uint16_t mcount; + + if (!section) + return -pte_internal; + + mcount = section->mcount; + if (!mcount) + return -pte_internal; + + if (!section->mapping) + return -pte_internal; + + mcount = --section->mcount; + if (!mcount) + section->mapping = NULL; + + return 0; +} + +static int ifix_read(const struct pt_section *section, uint8_t *buffer, + uint16_t size, uint64_t offset) +{ + struct ifix_mapping *mapping; + uint64_t begin, end; + + if (!section || !buffer) + return -pte_internal; + + begin = offset; + end = begin + size; + + if (end < begin) + return -pte_nomap; + + mapping = section->mapping; + if (!mapping) + return -pte_nomap; + + if (mapping->errcode) + return mapping->errcode; + + if (mapping->size <= begin) + return -pte_nomap; + + if (mapping->size < end) { + end = mapping->size; + size = (uint16_t) (end - begin); + } + + memcpy(buffer, &mapping->content[begin], size); + + return size; +} + +int pt_section_map(struct pt_section *section) +{ + struct ifix_status *status; + uint16_t mcount; + + if (!section) + return -pte_internal; + + mcount = section->mcount++; + if (mcount) + return 0; + + if (section->mapping) + return -pte_internal; + + status = section->status; + if (!status) + return -pte_internal; + + section->mapping = status->mapping; + section->unmap = ifix_unmap; + section->read = ifix_read; + + return 0; +} + +int pt_section_unmap(struct pt_section *section) +{ + if (!section) + return -pte_internal; + + if (!section->unmap) + return -pte_nomap; + + return section->unmap(section); +} + +int pt_section_read(const struct pt_section *section, uint8_t *buffer, + uint16_t size, uint64_t offset) +{ + if (!section) + return -pte_internal; + + if (!section->read) + return -pte_nomap; + + return section->read(section, buffer, size, offset); +} + +/* A test read memory callback. */ +static int image_readmem_callback(uint8_t *buffer, size_t size, + const struct pt_asid *asid, + uint64_t ip, void *context) +{ + const uint8_t *memory; + size_t idx; + + (void) asid; + + if (!buffer) + return -pte_invalid; + + /* We use a constant offset of 0x3000. */ + if (ip < 0x3000ull) + return -pte_nomap; + + ip -= 0x3000ull; + + memory = (const uint8_t *) context; + if (!memory) + return -pte_internal; + + for (idx = 0; idx < size; ++idx) + buffer[idx] = memory[ip + idx]; + + return (int) idx; +} + +static struct ptunit_result init(void) +{ + struct pt_image image; + + memset(&image, 0xcd, sizeof(image)); + + pt_image_init(&image, NULL); + ptu_null(image.name); + ptu_null(image.sections); + ptu_null((void *) (uintptr_t) image.readmem.callback); + ptu_null(image.readmem.context); + + return ptu_passed(); +} + +static struct ptunit_result init_name(struct image_fixture *ifix) +{ + memset(&ifix->image, 0xcd, sizeof(ifix->image)); + + pt_image_init(&ifix->image, "image-name"); + ptu_str_eq(ifix->image.name, "image-name"); + ptu_null(ifix->image.sections); + ptu_null((void *) (uintptr_t) ifix->image.readmem.callback); + ptu_null(ifix->image.readmem.context); + + return ptu_passed(); +} + +static struct ptunit_result init_null(void) +{ + pt_image_init(NULL, NULL); + + return ptu_passed(); +} + +static struct ptunit_result fini(void) +{ + struct ifix_mapping mapping; + struct ifix_status status; + struct pt_section section; + struct pt_image image; + struct pt_asid asid; + int errcode; + + pt_asid_init(&asid); + ifix_init_section(§ion, NULL, &status, &mapping, NULL); + + pt_image_init(&image, NULL); + errcode = pt_image_add(&image, §ion, &asid, 0x0ull, 0); + ptu_int_eq(errcode, 0); + + pt_image_fini(&image); + ptu_int_eq(section.ucount, 0); + ptu_int_eq(section.mcount, 0); + ptu_int_eq(status.deleted, 1); + ptu_int_eq(status.bad_put, 0); + + return ptu_passed(); +} + +static struct ptunit_result fini_empty(void) +{ + struct pt_image image; + + pt_image_init(&image, NULL); + pt_image_fini(&image); + + return ptu_passed(); +} + +static struct ptunit_result fini_null(void) +{ + pt_image_fini(NULL); + + return ptu_passed(); +} + +static struct ptunit_result name(struct image_fixture *ifix) +{ + const char *name; + + pt_image_init(&ifix->image, "image-name"); + + name = pt_image_name(&ifix->image); + ptu_str_eq(name, "image-name"); + + return ptu_passed(); +} + +static struct ptunit_result name_none(void) +{ + struct pt_image image; + const char *name; + + pt_image_init(&image, NULL); + + name = pt_image_name(&image); + ptu_null(name); + + return ptu_passed(); +} + +static struct ptunit_result name_null(void) +{ + const char *name; + + name = pt_image_name(NULL); + ptu_null(name); + + return ptu_passed(); +} + +static struct ptunit_result read_empty(struct image_fixture *ifix) +{ + struct pt_asid asid; + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + pt_asid_init(&asid); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer), + &asid, 0x1000ull); + ptu_int_eq(status, -pte_nomap); + ptu_int_eq(isid, -1); + ptu_uint_eq(buffer[0], 0xcc); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result overlap_front(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1001ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0], + 0x1000ull, 2); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1010ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x0f); + ptu_uint_eq(buffer[1], 0xcc); + + buffer[0] = 0xcc; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x100full); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x0f); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result overlap_back(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0], + 0x1001ull, 2); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1000ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x00); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1010ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x0f); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1001ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x00); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result overlap_multiple(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1010ull, 2); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1008ull, 3); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1007ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x07); + ptu_uint_eq(buffer[1], 0xcc); + + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1008ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x00); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1017ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x0f); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1018ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x08); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result overlap_mid(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 1); + ptu_int_eq(status, 0); + + ifix->section[1].size = 0x8; + ifix->mapping[1].size = 0x8; + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0], + 0x1004ull, 2); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1003ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1004ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x00); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x100bull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x07); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x100cull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x0c); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result contained(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + ifix->section[0].size = 0x8; + ifix->mapping[0].size = 0x8; + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1004ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0], + 0x1000ull, 2); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1008ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x08); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result contained_multiple(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + ifix->section[0].size = 0x2; + ifix->mapping[0].size = 0x2; + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1004ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1008ull, 2); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0], + 0x1000ull, 3); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1004ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x04); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1008ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x08); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result contained_back(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + ifix->section[0].size = 0x8; + ifix->mapping[0].size = 0x8; + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1004ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x100cull, 2); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0], + 0x1000ull, 3); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1004ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x04); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x100cull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x0c); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x100full); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x0f); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1010ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x04); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result same(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 1); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1008ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x08); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result same_different_isid(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 2); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1008ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x08); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result adjacent(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0], + 0x1000ull - ifix->section[1].size, 2); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[2], &ifix->asid[0], + 0x1000ull + ifix->section[0].size, 3); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1000ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x00); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0xfffull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], + ifix->mapping[1].content[ifix->mapping[1].size - 1]); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1000ull + ifix->section[0].size); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x00); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_null(struct image_fixture *ifix) +{ + uint8_t buffer; + int status, isid; + + status = pt_image_read(NULL, &isid, &buffer, 1, &ifix->asid[0], + 0x1000ull); + ptu_int_eq(status, -pte_internal); + + status = pt_image_read(&ifix->image, NULL, &buffer, 1, &ifix->asid[0], + 0x1000ull); + ptu_int_eq(status, -pte_internal); + + status = pt_image_read(&ifix->image, &isid, NULL, 1, &ifix->asid[0], + 0x1000ull); + ptu_int_eq(status, -pte_internal); + + status = pt_image_read(&ifix->image, &isid, &buffer, 1, NULL, + 0x1000ull); + ptu_int_eq(status, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result read(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1], + 0x2003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_asid(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[1], + 0x1008ull, 2); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1009ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x09); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[1], + 0x1009ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_bad_asid(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer), + &ifix->asid[0], 0x2003ull); + ptu_int_eq(status, -pte_nomap); + ptu_int_eq(isid, -1); + ptu_uint_eq(buffer[0], 0xcc); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_null_asid(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, NULL, 0x2003ull); + ptu_int_eq(status, -pte_internal); + ptu_int_eq(isid, -1); + ptu_uint_eq(buffer[0], 0xcc); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_callback(struct image_fixture *ifix) +{ + uint8_t memory[] = { 0xdd, 0x01, 0x02, 0xdd }; + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + status = pt_image_set_callback(&ifix->image, image_readmem_callback, + memory); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x3001ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 0); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_nomem(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer), + &ifix->asid[1], 0x1010ull); + ptu_int_eq(status, -pte_nomap); + ptu_int_eq(isid, -1); + ptu_uint_eq(buffer[0], 0xcc); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_truncated(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer), + &ifix->asid[0], 0x100full); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x0f); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_error(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc }; + int status, isid; + + ifix->mapping[0].errcode = -pte_nosync; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1000ull); + ptu_int_eq(status, -pte_nosync); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_spurious_error(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1000ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x00); + ptu_uint_eq(buffer[1], 0xcc); + + ifix->mapping[0].errcode = -pte_nosync; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0], + 0x1005ull); + ptu_int_eq(status, -pte_nosync); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x00); + + return ptu_passed(); +} + +static struct ptunit_result remove_section(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1001ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_image_remove(&ifix->image, &ifix->section[0], + &ifix->asid[0], 0x1000ull); + ptu_int_eq(status, 0); + + ptu_int_ne(ifix->status[0].deleted, 0); + ptu_int_eq(ifix->status[1].deleted, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer), + &ifix->asid[0], 0x1003ull); + ptu_int_eq(status, -pte_nomap); + ptu_int_eq(isid, -1); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1], + 0x2003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result remove_bad_vaddr(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1001ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_image_remove(&ifix->image, &ifix->section[0], + &ifix->asid[0], 0x2000ull); + ptu_int_eq(status, -pte_bad_image); + + ptu_int_eq(ifix->status[0].deleted, 0); + ptu_int_eq(ifix->status[1].deleted, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1], + 0x2005ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x05); + ptu_uint_eq(buffer[1], 0x06); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result remove_bad_asid(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1001ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_image_remove(&ifix->image, &ifix->section[0], + &ifix->asid[1], 0x1000ull); + ptu_int_eq(status, -pte_bad_image); + + ptu_int_eq(ifix->status[0].deleted, 0); + ptu_int_eq(ifix->status[1].deleted, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1], + 0x2005ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x05); + ptu_uint_eq(buffer[1], 0x06); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result remove_by_filename(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1001ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_image_remove_by_filename(&ifix->image, + ifix->section[0].filename, + &ifix->asid[0]); + ptu_int_eq(status, 1); + + ptu_int_ne(ifix->status[0].deleted, 0); + ptu_int_eq(ifix->status[1].deleted, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer), + &ifix->asid[0], 0x1003ull); + ptu_int_eq(status, -pte_nomap); + ptu_int_eq(isid, -1); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1], + 0x2003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result +remove_by_filename_bad_asid(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1001ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_image_remove_by_filename(&ifix->image, + ifix->section[0].filename, + &ifix->asid[1]); + ptu_int_eq(status, 0); + + ptu_int_eq(ifix->status[0].deleted, 0); + ptu_int_eq(ifix->status[1].deleted, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1], + 0x2005ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x05); + ptu_uint_eq(buffer[1], 0x06); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result remove_none_by_filename(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + status = pt_image_remove_by_filename(&ifix->image, "bad-name", + &ifix->asid[0]); + ptu_int_eq(status, 0); + + ptu_int_eq(ifix->status[0].deleted, 0); + ptu_int_eq(ifix->status[1].deleted, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1], + 0x2001ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result remove_all_by_filename(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + ifix->section[0].filename = "same-name"; + ifix->section[1].filename = "same-name"; + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0], + 0x2000ull, 2); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1001ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_image_remove_by_filename(&ifix->image, "same-name", + &ifix->asid[0]); + ptu_int_eq(status, 2); + + ptu_int_ne(ifix->status[0].deleted, 0); + ptu_int_ne(ifix->status[1].deleted, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer), + &ifix->asid[0], 0x1003ull); + ptu_int_eq(status, -pte_nomap); + ptu_int_eq(isid, -1); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer), + &ifix->asid[0], 0x2003ull); + ptu_int_eq(status, -pte_nomap); + ptu_int_eq(isid, -1); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result remove_by_asid(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1001ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 10); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_image_remove_by_asid(&ifix->image, &ifix->asid[0]); + ptu_int_eq(status, 1); + + ptu_int_ne(ifix->status[0].deleted, 0); + ptu_int_eq(ifix->status[1].deleted, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer), + &ifix->asid[0], 0x1003ull); + ptu_int_eq(status, -pte_nomap); + ptu_int_eq(isid, -1); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0x02); + ptu_uint_eq(buffer[2], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1], + 0x2003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result copy_empty(struct image_fixture *ifix) +{ + struct pt_asid asid; + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + pt_asid_init(&asid); + + status = pt_image_copy(&ifix->copy, &ifix->image); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, sizeof(buffer), + &asid, 0x1000ull); + ptu_int_eq(status, -pte_nomap); + ptu_int_eq(isid, -1); + ptu_uint_eq(buffer[0], 0xcc); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result copy(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + status = pt_image_copy(&ifix->copy, &ifix->image); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 2, &ifix->asid[1], + 0x2003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result copy_self(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + status = pt_image_copy(&ifix->image, &ifix->image); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1], + 0x2003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result copy_shrink(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->copy, &ifix->section[1], &ifix->asid[1], + 0x2000ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_copy(&ifix->copy, &ifix->image); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 2, &ifix->asid[1], + 0x2003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 11); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result copy_split(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->copy, &ifix->section[0], &ifix->asid[0], + 0x2000ull, 1); + ptu_int_eq(status, 0); + + ifix->section[1].size = 0x7; + ifix->mapping[1].size = 0x7; + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0], + 0x2001ull, 2); + ptu_int_eq(status, 0); + + ifix->section[2].size = 0x8; + ifix->mapping[2].size = 0x8; + + status = pt_image_add(&ifix->image, &ifix->section[2], &ifix->asid[0], + 0x2008ull, 3); + ptu_int_eq(status, 0); + + status = pt_image_copy(&ifix->copy, &ifix->image); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0], + 0x2003ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x02); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0], + 0x2009ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x01); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0], + 0x2000ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x00); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result copy_merge(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + ifix->section[1].size = 0x8; + ifix->mapping[1].size = 0x8; + + status = pt_image_add(&ifix->copy, &ifix->section[1], &ifix->asid[0], + 0x2000ull, 1); + ptu_int_eq(status, 0); + + ifix->section[2].size = 0x8; + ifix->mapping[2].size = 0x8; + + status = pt_image_add(&ifix->copy, &ifix->section[2], &ifix->asid[0], + 0x2008ull, 2); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x2000ull, 3); + ptu_int_eq(status, 0); + + status = pt_image_copy(&ifix->copy, &ifix->image); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0], + 0x2003ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0], + 0x200aull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x0a); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result copy_overlap(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add(&ifix->copy, &ifix->section[0], &ifix->asid[0], + 0x2000ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->copy, &ifix->section[1], &ifix->asid[0], + 0x2010ull, 2); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[2], &ifix->asid[0], + 0x2008ull, 3); + ptu_int_eq(status, 0); + + status = pt_image_copy(&ifix->copy, &ifix->image); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0], + 0x2003ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 1); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0], + 0x200aull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x02); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0], + 0x2016ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 3); + ptu_uint_eq(buffer[0], 0x0e); + ptu_uint_eq(buffer[1], 0xcc); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0], + 0x2019ull); + ptu_int_eq(status, 1); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x09); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result copy_replace(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + ifix->section[0].size = 0x8; + ifix->mapping[0].size = 0x8; + + status = pt_image_add(&ifix->copy, &ifix->section[0], &ifix->asid[0], + 0x1004ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0], + 0x1000ull, 2); + ptu_int_eq(status, 0); + + status = pt_image_copy(&ifix->copy, &ifix->image); + ptu_int_eq(status, 0); + + isid = -1; + status = pt_image_read(&ifix->copy, &isid, buffer, 2, &ifix->asid[0], + 0x1003ull); + ptu_int_eq(status, 2); + ptu_int_eq(isid, 2); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result add_cached_null(void) +{ + struct pt_image_section_cache iscache; + struct pt_image image; + int status; + + status = pt_image_add_cached(NULL, &iscache, 0, NULL); + ptu_int_eq(status, -pte_invalid); + + status = pt_image_add_cached(&image, NULL, 0, NULL); + ptu_int_eq(status, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result add_cached(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid, risid; + + isid = ifix_cache_section(ifix, &ifix->section[0], 0x1000ull); + ptu_int_gt(isid, 0); + + status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid, + &ifix->asid[0]); + ptu_int_eq(status, 0); + + risid = -1; + status = pt_image_read(&ifix->image, &risid, buffer, 2, &ifix->asid[0], + 0x1003ull); + ptu_int_eq(status, 2); + ptu_int_eq(risid, isid); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result add_cached_null_asid(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid, risid; + + isid = ifix_cache_section(ifix, &ifix->section[0], 0x1000ull); + ptu_int_gt(isid, 0); + + status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid, NULL); + ptu_int_eq(status, 0); + + risid = -1; + status = pt_image_read(&ifix->image, &risid, buffer, 2, &ifix->asid[0], + 0x1003ull); + ptu_int_eq(status, 2); + ptu_int_eq(risid, isid); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result add_cached_twice(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid, risid; + + isid = ifix_cache_section(ifix, &ifix->section[0], 0x1000ull); + ptu_int_gt(isid, 0); + + status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid, + &ifix->asid[0]); + ptu_int_eq(status, 0); + + status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid, + &ifix->asid[0]); + ptu_int_eq(status, 0); + + risid = -1; + status = pt_image_read(&ifix->image, &risid, buffer, 2, &ifix->asid[0], + 0x1003ull); + ptu_int_eq(status, 2); + ptu_int_eq(risid, isid); + ptu_uint_eq(buffer[0], 0x03); + ptu_uint_eq(buffer[1], 0x04); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result add_cached_bad_isid(struct image_fixture *ifix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + status = pt_image_add_cached(&ifix->image, &ifix->iscache, 1, + &ifix->asid[0]); + ptu_int_eq(status, -pte_bad_image); + + isid = -1; + status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0], + 0x1003ull); + ptu_int_eq(status, -pte_nomap); + ptu_int_eq(isid, -1); + + return ptu_passed(); +} + +static struct ptunit_result find_null(struct image_fixture *ifix) +{ + struct pt_section *section; + uint64_t laddr; + int status; + + status = pt_image_find(NULL, §ion, &laddr, &ifix->asid[0], + 0x1000ull); + ptu_int_eq(status, -pte_internal); + + status = pt_image_find(&ifix->image, NULL, &laddr, &ifix->asid[0], + 0x1000ull); + ptu_int_eq(status, -pte_internal); + + status = pt_image_find(&ifix->image, §ion, NULL, &ifix->asid[0], + 0x1000ull); + ptu_int_eq(status, -pte_internal); + + status = pt_image_find(&ifix->image, §ion, &laddr, NULL, 0x1000ull); + ptu_int_eq(status, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result find(struct image_fixture *ifix) +{ + struct pt_section *section; + uint64_t laddr; + int status; + + status = pt_image_find(&ifix->image, §ion, &laddr, &ifix->asid[1], + 0x2003ull); + ptu_int_eq(status, 11); + ptu_ptr_eq(section, &ifix->section[1]); + ptu_uint_eq(laddr, 0x2000ull); + + status = pt_section_put(section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result find_asid(struct image_fixture *ifix) +{ + struct pt_section *section; + uint64_t laddr; + int status; + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 1); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[1], + 0x1008ull, 2); + ptu_int_eq(status, 0); + + status = pt_image_find(&ifix->image, §ion, &laddr, &ifix->asid[0], + 0x1009ull); + ptu_int_eq(status, 1); + ptu_ptr_eq(section, &ifix->section[0]); + ptu_uint_eq(laddr, 0x1000ull); + + status = pt_section_put(section); + ptu_int_eq(status, 0); + + status = pt_image_find(&ifix->image, §ion, &laddr, &ifix->asid[1], + 0x1009ull); + ptu_int_eq(status, 2); + ptu_ptr_eq(section, &ifix->section[0]); + ptu_uint_eq(laddr, 0x1008ull); + + status = pt_section_put(section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result find_bad_asid(struct image_fixture *ifix) +{ + struct pt_section *section; + uint64_t laddr; + int status; + + status = pt_image_find(&ifix->image, §ion, &laddr, &ifix->asid[0], + 0x2003ull); + ptu_int_eq(status, -pte_nomap); + + return ptu_passed(); +} + +static struct ptunit_result find_nomem(struct image_fixture *ifix) +{ + struct pt_section *section; + uint64_t laddr; + int status; + + status = pt_image_find(&ifix->image, §ion, &laddr, &ifix->asid[1], + 0x1010ull); + ptu_int_eq(status, -pte_nomap); + + return ptu_passed(); +} + +static struct ptunit_result validate_null(struct image_fixture *ifix) +{ + int status; + + status = pt_image_validate(NULL, &ifix->asid[0], 0x1004ull, + &ifix->section[0], 0x1000ull, 10); + ptu_int_eq(status, -pte_internal); + + status = pt_image_validate(&ifix->image, NULL, 0x1004ull, + &ifix->section[0], 0x1000ull, 10); + ptu_int_eq(status, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result validate(struct image_fixture *ifix) +{ + int status; + + /* This test depends on the order in which sections are stored when + * added to the image. + * + * Since pt_image_validate() only looks at the top of the LRU stack we + * can only validate that section - i.e. the one that was added first. + */ + status = pt_image_validate(&ifix->image, &ifix->asid[0], 0x1004ull, + &ifix->section[0], 0x1000ull, 10); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result validate_bad_asid(struct image_fixture *ifix) +{ + int status; + + status = pt_image_validate(&ifix->image, &ifix->asid[1], 0x1004ull, + &ifix->section[0], 0x1000ull, 10); + ptu_int_eq(status, -pte_nomap); + + return ptu_passed(); +} + +static struct ptunit_result validate_bad_laddr(struct image_fixture *ifix) +{ + int status; + + status = pt_image_validate(&ifix->image, &ifix->asid[0], 0x1004ull, + &ifix->section[0], 0x2000ull, 10); + ptu_int_eq(status, -pte_nomap); + + return ptu_passed(); +} + +static struct ptunit_result validate_bad_isid(struct image_fixture *ifix) +{ + int status; + + status = pt_image_validate(&ifix->image, &ifix->asid[0], 0x1004ull, + &ifix->section[0], 0x1000ull, 11); + ptu_int_eq(status, -pte_nomap); + + return ptu_passed(); +} + +struct ptunit_result ifix_init(struct image_fixture *ifix) +{ + int index; + + pt_image_init(&ifix->image, NULL); + pt_image_init(&ifix->copy, NULL); + + memset(ifix->status, 0, sizeof(ifix->status)); + memset(ifix->mapping, 0, sizeof(ifix->mapping)); + memset(ifix->section, 0, sizeof(ifix->section)); + memset(&ifix->iscache, 0, sizeof(ifix->iscache)); + + ifix->nsecs = 0; + + index = ifix_add_section(ifix, "file-0"); + ptu_int_eq(index, 0); + + index = ifix_add_section(ifix, "file-1"); + ptu_int_eq(index, 1); + + index = ifix_add_section(ifix, "file-2"); + ptu_int_eq(index, 2); + + pt_asid_init(&ifix->asid[0]); + ifix->asid[0].cr3 = 0xa000; + + pt_asid_init(&ifix->asid[1]); + ifix->asid[1].cr3 = 0xb000; + + pt_asid_init(&ifix->asid[2]); + ifix->asid[2].cr3 = 0xc000; + + return ptu_passed(); +} + +struct ptunit_result rfix_init(struct image_fixture *ifix) +{ + int status; + + ptu_check(ifix_init, ifix); + + status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0], + 0x1000ull, 10); + ptu_int_eq(status, 0); + + status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[1], + 0x2000ull, 11); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +struct ptunit_result dfix_fini(struct image_fixture *ifix) +{ + pt_image_fini(&ifix->image); + + return ptu_passed(); +} + +struct ptunit_result ifix_fini(struct image_fixture *ifix) +{ + int sec; + + ptu_check(dfix_fini, ifix); + + pt_image_fini(&ifix->copy); + + for (sec = 0; sec < ifix_nsecs; ++sec) { + ptu_int_eq(ifix->section[sec].ucount, 0); + ptu_int_eq(ifix->section[sec].mcount, 0); + ptu_int_le(ifix->status[sec].deleted, 1); + ptu_int_eq(ifix->status[sec].bad_put, 0); + } + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct image_fixture dfix, ifix, rfix; + struct ptunit_suite suite; + + /* Dfix provides image destruction. */ + dfix.init = NULL; + dfix.fini = dfix_fini; + + /* Ifix provides an empty image. */ + ifix.init = ifix_init; + ifix.fini = ifix_fini; + + /* Rfix provides an image with two sections added. */ + rfix.init = rfix_init; + rfix.fini = ifix_fini; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, init); + ptu_run_f(suite, init_name, dfix); + ptu_run(suite, init_null); + + ptu_run(suite, fini); + ptu_run(suite, fini_empty); + ptu_run(suite, fini_null); + + ptu_run_f(suite, name, dfix); + ptu_run(suite, name_none); + ptu_run(suite, name_null); + + ptu_run_f(suite, read_empty, ifix); + ptu_run_f(suite, overlap_front, ifix); + ptu_run_f(suite, overlap_back, ifix); + ptu_run_f(suite, overlap_multiple, ifix); + ptu_run_f(suite, overlap_mid, ifix); + ptu_run_f(suite, contained, ifix); + ptu_run_f(suite, contained_multiple, ifix); + ptu_run_f(suite, contained_back, ifix); + ptu_run_f(suite, same, ifix); + ptu_run_f(suite, same_different_isid, ifix); + ptu_run_f(suite, adjacent, ifix); + + ptu_run_f(suite, read_null, rfix); + ptu_run_f(suite, read, rfix); + ptu_run_f(suite, read_null, rfix); + ptu_run_f(suite, read_asid, ifix); + ptu_run_f(suite, read_bad_asid, rfix); + ptu_run_f(suite, read_null_asid, rfix); + ptu_run_f(suite, read_callback, rfix); + ptu_run_f(suite, read_nomem, rfix); + ptu_run_f(suite, read_truncated, rfix); + ptu_run_f(suite, read_error, rfix); + ptu_run_f(suite, read_spurious_error, rfix); + + ptu_run_f(suite, remove_section, rfix); + ptu_run_f(suite, remove_bad_vaddr, rfix); + ptu_run_f(suite, remove_bad_asid, rfix); + ptu_run_f(suite, remove_by_filename, rfix); + ptu_run_f(suite, remove_by_filename_bad_asid, rfix); + ptu_run_f(suite, remove_none_by_filename, rfix); + ptu_run_f(suite, remove_all_by_filename, ifix); + ptu_run_f(suite, remove_by_asid, rfix); + + ptu_run_f(suite, copy_empty, ifix); + ptu_run_f(suite, copy, rfix); + ptu_run_f(suite, copy_self, rfix); + ptu_run_f(suite, copy_shrink, rfix); + ptu_run_f(suite, copy_split, ifix); + ptu_run_f(suite, copy_merge, ifix); + ptu_run_f(suite, copy_overlap, ifix); + ptu_run_f(suite, copy_replace, ifix); + + ptu_run(suite, add_cached_null); + ptu_run_f(suite, add_cached, ifix); + ptu_run_f(suite, add_cached_null_asid, ifix); + ptu_run_f(suite, add_cached_twice, ifix); + ptu_run_f(suite, add_cached_bad_isid, ifix); + + ptu_run_f(suite, find_null, rfix); + ptu_run_f(suite, find, rfix); + ptu_run_f(suite, find_asid, ifix); + ptu_run_f(suite, find_bad_asid, rfix); + ptu_run_f(suite, find_nomem, rfix); + + ptu_run_f(suite, validate_null, rfix); + ptu_run_f(suite, validate, rfix); + ptu_run_f(suite, validate_bad_asid, rfix); + ptu_run_f(suite, validate_bad_laddr, rfix); + ptu_run_f(suite, validate_bad_isid, rfix); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-image_section_cache.c b/libipt/test/src/ptunit-image_section_cache.c new file mode 100644 index 0000000..ee7cde9 --- /dev/null +++ b/libipt/test/src/ptunit-image_section_cache.c @@ -0,0 +1,1153 @@ +/* + * Copyright (c) 2016-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_image_section_cache.h" + +#include "ptunit_threads.h" + +#include "intel-pt.h" + +#include + + +struct pt_section { + /* The filename. We only support string literals for testing. */ + const char *filename; + + /* The file offset and size. */ + uint64_t offset; + uint64_t size; + + /* The file content. */ + uint8_t content[0x10]; + + /* The use count. */ + int ucount; + + /* The map count. */ + int mcount; + +#if defined(FEATURE_THREADS) + /* A lock protecting this section. */ + mtx_t lock; +#endif /* defined(FEATURE_THREADS) */ +}; + +struct pt_section *pt_mk_section(const char *filename, uint64_t offset, + uint64_t size) +{ + struct pt_section *section; + + section = malloc(sizeof(*section)); + if (section) { + uint8_t idx; + + section->filename = filename; + section->offset = offset; + section->size = size; + section->ucount = 1; + section->mcount = 0; + + for (idx = 0; idx < sizeof(section->content); ++idx) + section->content[idx] = idx; + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_init(§ion->lock, mtx_plain); + if (errcode != thrd_success) { + free(section); + section = NULL; + } + } +#endif /* defined(FEATURE_THREADS) */ + } + + return section; +} + +static int pt_section_lock(struct pt_section *section) +{ + if (!section) + return -pte_internal; + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_lock(§ion->lock); + if (errcode != thrd_success) + return -pte_bad_lock; + } +#endif /* defined(FEATURE_THREADS) */ + + return 0; +} + +static int pt_section_unlock(struct pt_section *section) +{ + if (!section) + return -pte_internal; + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_unlock(§ion->lock); + if (errcode != thrd_success) + return -pte_bad_lock; + } +#endif /* defined(FEATURE_THREADS) */ + + return 0; +} + +int pt_section_get(struct pt_section *section) +{ + int errcode, ucount; + + if (!section) + return -pte_internal; + + errcode = pt_section_lock(section); + if (errcode < 0) + return errcode; + + ucount = ++section->ucount; + + errcode = pt_section_unlock(section); + if (errcode < 0) + return errcode; + + if (!ucount) + return -pte_internal; + + return 0; +} + +int pt_section_put(struct pt_section *section) +{ + int errcode, ucount; + + if (!section) + return -pte_internal; + + errcode = pt_section_lock(section); + if (errcode < 0) + return errcode; + + ucount = --section->ucount; + + errcode = pt_section_unlock(section); + if (errcode < 0) + return errcode; + + if (!ucount) { +#if defined(FEATURE_THREADS) + mtx_destroy(§ion->lock); +#endif /* defined(FEATURE_THREADS) */ + free(section); + } + + return 0; +} + +int pt_section_map(struct pt_section *section) +{ + int errcode, mcount; + + if (!section) + return -pte_internal; + + errcode = pt_section_lock(section); + if (errcode < 0) + return errcode; + + mcount = ++section->mcount; + + errcode = pt_section_unlock(section); + if (errcode < 0) + return errcode; + + if (mcount <= 0) + return -pte_internal; + + return 0; +} + +int pt_section_unmap(struct pt_section *section) +{ + int errcode, mcount; + + if (!section) + return -pte_internal; + + errcode = pt_section_lock(section); + if (errcode < 0) + return errcode; + + mcount = --section->mcount; + + errcode = pt_section_unlock(section); + if (errcode < 0) + return errcode; + + if (mcount < 0) + return -pte_internal; + + return 0; +} + +const char *pt_section_filename(const struct pt_section *section) +{ + if (!section) + return NULL; + + return section->filename; +} + +uint64_t pt_section_offset(const struct pt_section *section) +{ + if (!section) + return 0ull; + + return section->offset; +} + +uint64_t pt_section_size(const struct pt_section *section) +{ + if (!section) + return 0ull; + + return section->size; +} + +int pt_section_read(const struct pt_section *section, uint8_t *buffer, + uint16_t size, uint64_t offset) +{ + uint64_t begin, end, max; + + if (!section || !buffer) + return -pte_internal; + + begin = offset; + end = begin + size; + max = sizeof(section->content); + + if (max <= begin) + return -pte_nomap; + + if (max < end) + end = max; + + if (end <= begin) + return -pte_invalid; + + memcpy(buffer, §ion->content[begin], (size_t) (end - begin)); + return (int) (end - begin); +} + +enum { + /* The number of test sections. */ + num_sections = 8, + +#if defined(FEATURE_THREADS) + + num_threads = 8, + +#endif /* defined(FEATURE_THREADS) */ + + num_iterations = 0x1000 +}; + +struct iscache_fixture { + /* Threading support. */ + struct ptunit_thrd_fixture thrd; + + /* The image section cache under test. */ + struct pt_image_section_cache iscache; + + /* A bunch of test sections. */ + struct pt_section *section[num_sections]; + + /* The test fixture initialization and finalization functions. */ + struct ptunit_result (*init)(struct iscache_fixture *); + struct ptunit_result (*fini)(struct iscache_fixture *); +}; + +static struct ptunit_result dfix_init(struct iscache_fixture *cfix) +{ + int idx; + + ptu_test(ptunit_thrd_init, &cfix->thrd); + + memset(cfix->section, 0, sizeof(cfix->section)); + + for (idx = 0; idx < num_sections; ++idx) { + struct pt_section *section; + + section = pt_mk_section("some-filename", + idx % 3 == 0 ? 0x1000 : 0x2000, + idx % 2 == 0 ? 0x1000 : 0x2000); + ptu_ptr(section); + + cfix->section[idx] = section; + } + + return ptu_passed(); +} + +static struct ptunit_result cfix_init(struct iscache_fixture *cfix) +{ + int errcode; + + ptu_test(dfix_init, cfix); + + errcode = pt_iscache_init(&cfix->iscache, NULL); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result cfix_fini(struct iscache_fixture *cfix) +{ + int idx, errcode; + + ptu_test(ptunit_thrd_fini, &cfix->thrd); + + for (idx = 0; idx < cfix->thrd.nthreads; ++idx) + ptu_int_eq(cfix->thrd.result[idx], 0); + + pt_iscache_fini(&cfix->iscache); + + for (idx = 0; idx < num_sections; ++idx) { + ptu_int_eq(cfix->section[idx]->ucount, 1); + ptu_int_eq(cfix->section[idx]->mcount, 0); + + errcode = pt_section_put(cfix->section[idx]); + ptu_int_eq(errcode, 0); + } + + return ptu_passed(); +} + + +static struct ptunit_result init_null(void) +{ + int errcode; + + errcode = pt_iscache_init(NULL, NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result fini_null(void) +{ + pt_iscache_fini(NULL); + + return ptu_passed(); +} + +static struct ptunit_result name_null(void) +{ + const char *name; + + name = pt_iscache_name(NULL); + ptu_null(name); + + return ptu_passed(); +} + +static struct ptunit_result add_null(void) +{ + struct pt_image_section_cache iscache; + struct pt_section section; + int errcode; + + errcode = pt_iscache_add(NULL, §ion, 0ull); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_iscache_add(&iscache, NULL, 0ull); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result find_null(void) +{ + int errcode; + + errcode = pt_iscache_find(NULL, "filename", 0ull, 0ull, 0ull); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result lookup_null(void) +{ + struct pt_image_section_cache iscache; + struct pt_section *section; + uint64_t laddr; + int errcode; + + errcode = pt_iscache_lookup(NULL, §ion, &laddr, 0); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_iscache_lookup(&iscache, NULL, &laddr, 0); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_iscache_lookup(&iscache, §ion, NULL, 0); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result clear_null(void) +{ + int errcode; + + errcode = pt_iscache_clear(NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result free_null(void) +{ + pt_iscache_free(NULL); + + return ptu_passed(); +} + +static struct ptunit_result add_file_null(void) +{ + struct pt_image_section_cache iscache; + int errcode; + + errcode = pt_iscache_add_file(NULL, "filename", 0ull, 0ull, 0ull); + ptu_int_eq(errcode, -pte_invalid); + + errcode = pt_iscache_add_file(&iscache, NULL, 0ull, 0ull, 0ull); + ptu_int_eq(errcode, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result read_null(void) +{ + struct pt_image_section_cache iscache; + uint8_t buffer; + int errcode; + + errcode = pt_iscache_read(NULL, &buffer, sizeof(buffer), 1ull, 0ull); + ptu_int_eq(errcode, -pte_invalid); + + errcode = pt_iscache_read(&iscache, NULL, sizeof(buffer), 1ull, 0ull); + ptu_int_eq(errcode, -pte_invalid); + + errcode = pt_iscache_read(&iscache, &buffer, 0ull, 1, 0ull); + ptu_int_eq(errcode, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result init_fini(struct iscache_fixture *cfix) +{ + (void) cfix; + + /* The actual init and fini calls are in cfix_init() and cfix_fini(). */ + return ptu_passed(); +} + +static struct ptunit_result name(struct iscache_fixture *cfix) +{ + const char *name; + + pt_iscache_init(&cfix->iscache, "iscache-name"); + + name = pt_iscache_name(&cfix->iscache); + ptu_str_eq(name, "iscache-name"); + + return ptu_passed(); +} + +static struct ptunit_result name_none(struct iscache_fixture *cfix) +{ + const char *name; + + pt_iscache_init(&cfix->iscache, NULL); + + name = pt_iscache_name(&cfix->iscache); + ptu_null(name); + + return ptu_passed(); +} + +static struct ptunit_result add(struct iscache_fixture *cfix) +{ + int isid; + + isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull); + ptu_int_gt(isid, 0); + + /* The cache gets a reference on success. */ + ptu_int_eq(cfix->section[0]->ucount, 2); + + /* The added section must be implicitly put in pt_iscache_fini. */ + return ptu_passed(); +} + +static struct ptunit_result add_no_name(struct iscache_fixture *cfix) +{ + struct pt_section section; + int errcode; + + memset(§ion, 0, sizeof(section)); + + errcode = pt_iscache_add(&cfix->iscache, §ion, 0ull); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result add_file(struct iscache_fixture *cfix) +{ + int isid; + + isid = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull); + ptu_int_gt(isid, 0); + + return ptu_passed(); +} + +static struct ptunit_result find(struct iscache_fixture *cfix) +{ + struct pt_section *section; + int found, isid; + + section = cfix->section[0]; + ptu_ptr(section); + + isid = pt_iscache_add(&cfix->iscache, section, 0ull); + ptu_int_gt(isid, 0); + + found = pt_iscache_find(&cfix->iscache, section->filename, + section->offset, section->size, 0ull); + ptu_int_eq(found, isid); + + return ptu_passed(); +} + +static struct ptunit_result find_empty(struct iscache_fixture *cfix) +{ + struct pt_section *section; + int found; + + section = cfix->section[0]; + ptu_ptr(section); + + found = pt_iscache_find(&cfix->iscache, section->filename, + section->offset, section->size, 0ull); + ptu_int_eq(found, 0); + + return ptu_passed(); +} + +static struct ptunit_result find_bad_filename(struct iscache_fixture *cfix) +{ + struct pt_section *section; + int found, isid; + + section = cfix->section[0]; + ptu_ptr(section); + + isid = pt_iscache_add(&cfix->iscache, section, 0ull); + ptu_int_gt(isid, 0); + + found = pt_iscache_find(&cfix->iscache, "bad-filename", + section->offset, section->size, 0ull); + ptu_int_eq(found, 0); + + return ptu_passed(); +} + +static struct ptunit_result find_null_filename(struct iscache_fixture *cfix) +{ + int errcode; + + errcode = pt_iscache_find(&cfix->iscache, NULL, 0ull, 0ull, 0ull); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result find_bad_offset(struct iscache_fixture *cfix) +{ + struct pt_section *section; + int found, isid; + + section = cfix->section[0]; + ptu_ptr(section); + + isid = pt_iscache_add(&cfix->iscache, section, 0ull); + ptu_int_gt(isid, 0); + + found = pt_iscache_find(&cfix->iscache, section->filename, 0ull, + section->size, 0ull); + ptu_int_eq(found, 0); + + return ptu_passed(); +} + +static struct ptunit_result find_bad_size(struct iscache_fixture *cfix) +{ + struct pt_section *section; + int found, isid; + + section = cfix->section[0]; + ptu_ptr(section); + + isid = pt_iscache_add(&cfix->iscache, section, 0ull); + ptu_int_gt(isid, 0); + + found = pt_iscache_find(&cfix->iscache, section->filename, + section->offset, 0ull, 0ull); + ptu_int_eq(found, 0); + + return ptu_passed(); +} + +static struct ptunit_result find_bad_laddr(struct iscache_fixture *cfix) +{ + struct pt_section *section; + int found, isid; + + section = cfix->section[0]; + ptu_ptr(section); + + isid = pt_iscache_add(&cfix->iscache, section, 0ull); + ptu_int_gt(isid, 0); + + found = pt_iscache_find(&cfix->iscache, section->filename, + section->offset, section->size, 1ull); + ptu_int_eq(found, 0); + + return ptu_passed(); +} + +static struct ptunit_result lookup(struct iscache_fixture *cfix) +{ + struct pt_section *section; + uint64_t laddr; + int errcode, isid; + + isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull); + ptu_int_gt(isid, 0); + + errcode = pt_iscache_lookup(&cfix->iscache, §ion, &laddr, isid); + ptu_int_eq(errcode, 0); + ptu_ptr_eq(section, cfix->section[0]); + ptu_uint_eq(laddr, 0ull); + + errcode = pt_section_put(section); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result lookup_bad_isid(struct iscache_fixture *cfix) +{ + struct pt_section *section; + uint64_t laddr; + int errcode, isid; + + isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull); + ptu_int_gt(isid, 0); + + errcode = pt_iscache_lookup(&cfix->iscache, §ion, &laddr, 0); + ptu_int_eq(errcode, -pte_bad_image); + + errcode = pt_iscache_lookup(&cfix->iscache, §ion, &laddr, -isid); + ptu_int_eq(errcode, -pte_bad_image); + + errcode = pt_iscache_lookup(&cfix->iscache, §ion, &laddr, isid + 1); + ptu_int_eq(errcode, -pte_bad_image); + + return ptu_passed(); +} + +static struct ptunit_result clear_empty(struct iscache_fixture *cfix) +{ + int errcode; + + errcode = pt_iscache_clear(&cfix->iscache); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result clear_find(struct iscache_fixture *cfix) +{ + struct pt_section *section; + int errcode, found, isid; + + section = cfix->section[0]; + ptu_ptr(section); + + isid = pt_iscache_add(&cfix->iscache, section, 0ull); + ptu_int_gt(isid, 0); + + errcode = pt_iscache_clear(&cfix->iscache); + ptu_int_eq(errcode, 0); + + + found = pt_iscache_find(&cfix->iscache, section->filename, + section->offset, section->size, 0ull); + ptu_int_eq(found, 0); + + return ptu_passed(); +} + +static struct ptunit_result clear_lookup(struct iscache_fixture *cfix) +{ + struct pt_section *section; + uint64_t laddr; + int errcode, isid; + + isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull); + ptu_int_gt(isid, 0); + + errcode = pt_iscache_clear(&cfix->iscache); + ptu_int_eq(errcode, 0); + + errcode = pt_iscache_lookup(&cfix->iscache, §ion, &laddr, isid); + ptu_int_eq(errcode, -pte_bad_image); + + return ptu_passed(); +} + +static struct ptunit_result add_twice(struct iscache_fixture *cfix) +{ + int isid[2]; + + isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull); + ptu_int_gt(isid[0], 0); + + isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull); + ptu_int_gt(isid[1], 0); + + /* The second add should be ignored. */ + ptu_int_eq(isid[1], isid[0]); + + return ptu_passed(); +} + +static struct ptunit_result add_same(struct iscache_fixture *cfix) +{ + int isid[2]; + + isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull); + ptu_int_gt(isid[0], 0); + + cfix->section[1]->offset = cfix->section[0]->offset; + cfix->section[1]->size = cfix->section[0]->size; + + isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[1], 0ull); + ptu_int_gt(isid[1], 0); + + /* The second add should be ignored. */ + ptu_int_eq(isid[1], isid[0]); + + return ptu_passed(); +} + +static struct ptunit_result +add_twice_different_laddr(struct iscache_fixture *cfix) +{ + int isid[2]; + + isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull); + ptu_int_gt(isid[0], 0); + + isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[0], 1ull); + ptu_int_gt(isid[1], 0); + + /* We must get different identifiers. */ + ptu_int_ne(isid[1], isid[0]); + + /* We must take two references - one for each entry. */ + ptu_int_eq(cfix->section[0]->ucount, 3); + + return ptu_passed(); +} + +static struct ptunit_result +add_same_different_laddr(struct iscache_fixture *cfix) +{ + int isid[2]; + + isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull); + ptu_int_gt(isid[0], 0); + + cfix->section[1]->offset = cfix->section[0]->offset; + cfix->section[1]->size = cfix->section[0]->size; + + isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[1], 1ull); + ptu_int_gt(isid[1], 0); + + /* We must get different identifiers. */ + ptu_int_ne(isid[1], isid[0]); + + return ptu_passed(); +} + +static struct ptunit_result +add_different_same_laddr(struct iscache_fixture *cfix) +{ + int isid[2]; + + isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull); + ptu_int_gt(isid[0], 0); + + isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[1], 0ull); + ptu_int_gt(isid[1], 0); + + /* We must get different identifiers. */ + ptu_int_ne(isid[1], isid[0]); + + return ptu_passed(); +} + +static struct ptunit_result add_file_same(struct iscache_fixture *cfix) +{ + int isid[2]; + + isid[0] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull); + ptu_int_gt(isid[0], 0); + + isid[1] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull); + ptu_int_gt(isid[1], 0); + + /* The second add should be ignored. */ + ptu_int_eq(isid[1], isid[0]); + + return ptu_passed(); +} + +static struct ptunit_result +add_file_same_different_laddr(struct iscache_fixture *cfix) +{ + int isid[2]; + + isid[0] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull); + ptu_int_gt(isid[0], 0); + + isid[1] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 1ull); + ptu_int_gt(isid[1], 0); + + /* We must get different identifiers. */ + ptu_int_ne(isid[1], isid[0]); + + return ptu_passed(); +} + +static struct ptunit_result +add_file_different_same_laddr(struct iscache_fixture *cfix) +{ + int isid[2]; + + isid[0] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull); + ptu_int_gt(isid[0], 0); + + isid[1] = pt_iscache_add_file(&cfix->iscache, "name", 1ull, 1ull, 0ull); + ptu_int_gt(isid[1], 0); + + /* We must get different identifiers. */ + ptu_int_ne(isid[1], isid[0]); + + return ptu_passed(); +} + +static struct ptunit_result read(struct iscache_fixture *cfix) +{ + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status, isid; + + isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull); + ptu_int_gt(isid, 0); + + status = pt_iscache_read(&cfix->iscache, buffer, 2ull, isid, 0xa008ull); + ptu_int_eq(status, 2); + ptu_uint_eq(buffer[0], 0x8); + ptu_uint_eq(buffer[1], 0x9); + ptu_uint_eq(buffer[2], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_truncate(struct iscache_fixture *cfix) +{ + uint8_t buffer[] = { 0xcc, 0xcc }; + int status, isid; + + isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull); + ptu_int_gt(isid, 0); + + status = pt_iscache_read(&cfix->iscache, buffer, sizeof(buffer), isid, + 0xa00full); + ptu_int_eq(status, 1); + ptu_uint_eq(buffer[0], 0xf); + ptu_uint_eq(buffer[1], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_bad_vaddr(struct iscache_fixture *cfix) +{ + uint8_t buffer[] = { 0xcc }; + int status, isid; + + isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull); + ptu_int_gt(isid, 0); + + status = pt_iscache_read(&cfix->iscache, buffer, 1ull, isid, 0xb000ull); + ptu_int_eq(status, -pte_nomap); + ptu_uint_eq(buffer[0], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_bad_isid(struct iscache_fixture *cfix) +{ + uint8_t buffer[] = { 0xcc }; + int status, isid; + + isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull); + ptu_int_gt(isid, 0); + + status = pt_iscache_read(&cfix->iscache, buffer, 1ull, isid + 1, + 0xa000ull); + ptu_int_eq(status, -pte_bad_image); + ptu_uint_eq(buffer[0], 0xcc); + + return ptu_passed(); +} + +static int worker_add(void *arg) +{ + struct iscache_fixture *cfix; + int it; + + cfix = arg; + if (!cfix) + return -pte_internal; + + for (it = 0; it < num_iterations; ++it) { + uint64_t laddr; + int sec; + + laddr = 0x1000ull * (it % 23); + + for (sec = 0; sec < num_sections; ++sec) { + struct pt_section *section; + uint64_t addr; + int isid, errcode; + + isid = pt_iscache_add(&cfix->iscache, + cfix->section[sec], laddr); + if (isid < 0) + return isid; + + errcode = pt_iscache_lookup(&cfix->iscache, §ion, + &addr, isid); + if (errcode < 0) + return errcode; + + if (laddr != addr) + return -pte_noip; + + /* We may not get the image we added but the image we + * get must have similar attributes. + * + * We're using the same filename string literal for all + * sections, though. + */ + if (section->offset != cfix->section[sec]->offset) + return -pte_bad_image; + + if (section->size != cfix->section[sec]->size) + return -pte_bad_image; + + errcode = pt_section_put(section); + if (errcode < 0) + return errcode; + } + } + + return 0; +} + +static int worker_add_file(void *arg) +{ + struct iscache_fixture *cfix; + int it; + + cfix = arg; + if (!cfix) + return -pte_internal; + + for (it = 0; it < num_iterations; ++it) { + uint64_t offset, size, laddr; + int sec; + + offset = num_iterations % 7 == 0 ? 0x1000 : 0x2000; + size = num_iterations % 5 == 0 ? 0x1000 : 0x2000; + laddr = num_iterations % 3 == 0 ? 0x1000 : 0x2000; + + for (sec = 0; sec < num_sections; ++sec) { + struct pt_section *section; + uint64_t addr; + int isid, errcode; + + isid = pt_iscache_add_file(&cfix->iscache, "name", + offset, size, laddr); + if (isid < 0) + return isid; + + errcode = pt_iscache_lookup(&cfix->iscache, §ion, + &addr, isid); + if (errcode < 0) + return errcode; + + if (laddr != addr) + return -pte_noip; + + if (section->offset != offset) + return -pte_bad_image; + + if (section->size != size) + return -pte_bad_image; + + errcode = pt_section_put(section); + if (errcode < 0) + return errcode; + } + } + + return 0; +} + +static struct ptunit_result stress(struct iscache_fixture *cfix, + int (*worker)(void *)) +{ + int errcode; + +#if defined(FEATURE_THREADS) + { + int thrd; + + for (thrd = 0; thrd < num_threads; ++thrd) + ptu_test(ptunit_thrd_create, &cfix->thrd, worker, cfix); + } +#endif /* defined(FEATURE_THREADS) */ + + errcode = worker(cfix); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} +int main(int argc, char **argv) +{ + struct iscache_fixture cfix, dfix; + struct ptunit_suite suite; + + cfix.init = cfix_init; + cfix.fini = cfix_fini; + + dfix.init = dfix_init; + dfix.fini = cfix_fini; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, init_null); + ptu_run(suite, fini_null); + ptu_run(suite, name_null); + ptu_run(suite, add_null); + ptu_run(suite, find_null); + ptu_run(suite, lookup_null); + ptu_run(suite, clear_null); + ptu_run(suite, free_null); + ptu_run(suite, add_file_null); + ptu_run(suite, read_null); + + ptu_run_f(suite, name, dfix); + ptu_run_f(suite, name_none, dfix); + + ptu_run_f(suite, init_fini, cfix); + ptu_run_f(suite, add, cfix); + ptu_run_f(suite, add_no_name, cfix); + ptu_run_f(suite, add_file, cfix); + + ptu_run_f(suite, find, cfix); + ptu_run_f(suite, find_empty, cfix); + ptu_run_f(suite, find_bad_filename, cfix); + ptu_run_f(suite, find_null_filename, cfix); + ptu_run_f(suite, find_bad_offset, cfix); + ptu_run_f(suite, find_bad_size, cfix); + ptu_run_f(suite, find_bad_laddr, cfix); + + ptu_run_f(suite, lookup, cfix); + ptu_run_f(suite, lookup_bad_isid, cfix); + + ptu_run_f(suite, clear_empty, cfix); + ptu_run_f(suite, clear_find, cfix); + ptu_run_f(suite, clear_lookup, cfix); + + ptu_run_f(suite, add_twice, cfix); + ptu_run_f(suite, add_same, cfix); + ptu_run_f(suite, add_twice_different_laddr, cfix); + ptu_run_f(suite, add_same_different_laddr, cfix); + ptu_run_f(suite, add_different_same_laddr, cfix); + + ptu_run_f(suite, add_file_same, cfix); + ptu_run_f(suite, add_file_same_different_laddr, cfix); + ptu_run_f(suite, add_file_different_same_laddr, cfix); + + ptu_run_f(suite, read, cfix); + ptu_run_f(suite, read_truncate, cfix); + ptu_run_f(suite, read_bad_vaddr, cfix); + ptu_run_f(suite, read_bad_isid, cfix); + + ptu_run_fp(suite, stress, cfix, worker_add); + ptu_run_fp(suite, stress, cfix, worker_add_file); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-last_ip.c b/libipt/test/src/ptunit-last_ip.c new file mode 100644 index 0000000..9b708a4 --- /dev/null +++ b/libipt/test/src/ptunit-last_ip.c @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_last_ip.h" + +#include "intel-pt.h" + +#include + + +static struct ptunit_result init(void) +{ + struct pt_last_ip last_ip; + + memset(&last_ip, 0xcd, sizeof(last_ip)); + + pt_last_ip_init(&last_ip); + + ptu_uint_eq(last_ip.ip, 0ull); + ptu_uint_eq(last_ip.have_ip, 0); + ptu_uint_eq(last_ip.suppressed, 0); + + return ptu_passed(); +} + +static struct ptunit_result init_null(void) +{ + pt_last_ip_init(NULL); + + return ptu_passed(); +} + +static struct ptunit_result status_initial(void) +{ + struct pt_last_ip last_ip; + int errcode; + + pt_last_ip_init(&last_ip); + + errcode = pt_last_ip_query(NULL, &last_ip); + ptu_int_eq(errcode, -pte_noip); + + return ptu_passed(); +} + +static struct ptunit_result status(void) +{ + struct pt_last_ip last_ip; + int errcode; + + last_ip.have_ip = 1; + last_ip.suppressed = 0; + + errcode = pt_last_ip_query(NULL, &last_ip); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result status_null(void) +{ + int errcode; + + errcode = pt_last_ip_query(NULL, NULL); + ptu_int_eq(errcode, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result status_noip(void) +{ + struct pt_last_ip last_ip; + int errcode; + + last_ip.have_ip = 0; + last_ip.suppressed = 0; + + errcode = pt_last_ip_query(NULL, &last_ip); + ptu_int_eq(errcode, -pte_noip); + + return ptu_passed(); +} + +static struct ptunit_result status_suppressed(void) +{ + struct pt_last_ip last_ip; + int errcode; + + last_ip.have_ip = 1; + last_ip.suppressed = 1; + + errcode = pt_last_ip_query(NULL, &last_ip); + ptu_int_eq(errcode, -pte_ip_suppressed); + + return ptu_passed(); +} + +static struct ptunit_result query_initial(void) +{ + struct pt_last_ip last_ip; + uint64_t ip; + int errcode; + + pt_last_ip_init(&last_ip); + + errcode = pt_last_ip_query(&ip, &last_ip); + ptu_int_eq(errcode, -pte_noip); + + return ptu_passed(); +} + +static struct ptunit_result query(void) +{ + struct pt_last_ip last_ip; + uint64_t ip, exp = 42ull; + int errcode; + + last_ip.ip = 42ull; + last_ip.have_ip = 1; + last_ip.suppressed = 0; + + errcode = pt_last_ip_query(&ip, &last_ip); + ptu_int_eq(errcode, 0); + ptu_uint_eq(last_ip.ip, exp); + + return ptu_passed(); +} + +static struct ptunit_result query_null(void) +{ + uint64_t ip = 13ull; + int errcode; + + errcode = pt_last_ip_query(&ip, NULL); + ptu_int_eq(errcode, -pte_invalid); + ptu_uint_eq(ip, 13ull); + + return ptu_passed(); +} + +static struct ptunit_result query_noip(void) +{ + struct pt_last_ip last_ip; + uint64_t ip = 13ull; + int errcode; + + last_ip.ip = 42ull; + last_ip.have_ip = 0; + last_ip.suppressed = 0; + + errcode = pt_last_ip_query(&ip, &last_ip); + ptu_int_eq(errcode, -pte_noip); + ptu_uint_eq(ip, 0ull); + + return ptu_passed(); +} + +static struct ptunit_result query_suppressed(void) +{ + struct pt_last_ip last_ip; + uint64_t ip = 13ull; + int errcode; + + last_ip.ip = 42ull; + last_ip.have_ip = 1; + last_ip.suppressed = 1; + + errcode = pt_last_ip_query(&ip, &last_ip); + ptu_int_eq(errcode, -pte_ip_suppressed); + ptu_uint_eq(ip, 0ull); + + return ptu_passed(); +} + +static struct ptunit_result update_ip_suppressed(uint32_t have_ip) +{ + struct pt_last_ip last_ip; + struct pt_packet_ip packet; + int errcode; + + last_ip.ip = 42ull; + last_ip.have_ip = have_ip; + last_ip.suppressed = 0; + + packet.ipc = pt_ipc_suppressed; + packet.ip = 13ull; + + errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL); + ptu_int_eq(errcode, 0); + ptu_uint_eq(last_ip.ip, 42ull); + ptu_uint_eq(last_ip.have_ip, have_ip); + ptu_uint_eq(last_ip.suppressed, 1); + + return ptu_passed(); +} + +static struct ptunit_result update_ip_upd16(uint32_t have_ip) +{ + struct pt_last_ip last_ip; + struct pt_packet_ip packet; + int errcode; + + last_ip.ip = 0xff0042ull; + last_ip.have_ip = have_ip; + last_ip.suppressed = 0; + + packet.ipc = pt_ipc_update_16; + packet.ip = 0xccc013ull; + + errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL); + ptu_int_eq(errcode, 0); + ptu_uint_eq(last_ip.ip, 0xffc013ull); + ptu_uint_eq(last_ip.have_ip, 1); + ptu_uint_eq(last_ip.suppressed, 0); + + return ptu_passed(); +} + +static struct ptunit_result update_ip_upd32(uint32_t have_ip) +{ + struct pt_last_ip last_ip; + struct pt_packet_ip packet; + int errcode; + + last_ip.ip = 0xff00000420ull; + last_ip.have_ip = have_ip; + last_ip.suppressed = 0; + + packet.ipc = pt_ipc_update_32; + packet.ip = 0xcc0000c013ull; + + errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL); + ptu_int_eq(errcode, 0); + ptu_uint_eq(last_ip.ip, 0xff0000c013ull); + ptu_uint_eq(last_ip.have_ip, 1); + ptu_uint_eq(last_ip.suppressed, 0); + + return ptu_passed(); +} + +static struct ptunit_result update_ip_sext48(uint32_t have_ip) +{ + struct pt_last_ip last_ip; + struct pt_packet_ip packet; + int errcode; + + last_ip.ip = 0x7fffffffffffffffull; + last_ip.have_ip = have_ip; + last_ip.suppressed = 0; + + packet.ipc = pt_ipc_sext_48; + packet.ip = 0xff00000000ffull; + + errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL); + ptu_int_eq(errcode, 0); + ptu_uint_eq(last_ip.ip, 0xffffff00000000ffull); + ptu_uint_eq(last_ip.have_ip, 1); + ptu_uint_eq(last_ip.suppressed, 0); + + return ptu_passed(); +} + +static struct ptunit_result update_ip_bad_packet(uint32_t have_ip) +{ + struct pt_last_ip last_ip; + struct pt_packet_ip packet; + int errcode; + + last_ip.ip = 0x7fffffffffffffffull; + last_ip.have_ip = have_ip; + last_ip.suppressed = 0; + + packet.ipc = (enum pt_ip_compression) 0xff; + packet.ip = 0ull; + + errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL); + ptu_int_eq(errcode, -pte_bad_packet); + ptu_uint_eq(last_ip.ip, 0x7fffffffffffffffull); + ptu_uint_eq(last_ip.have_ip, have_ip); + ptu_uint_eq(last_ip.suppressed, 0); + + return ptu_passed(); +} + +static struct ptunit_result update_ip_null_ip(void) +{ + struct pt_packet_ip packet; + int errcode; + + errcode = pt_last_ip_update_ip(NULL, &packet, NULL); + ptu_int_eq(errcode, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result update_ip_null_packet(uint32_t have_ip) +{ + struct pt_last_ip last_ip; + int errcode; + + last_ip.ip = 0x7fffffffffffffffull; + last_ip.have_ip = have_ip; + last_ip.suppressed = 0; + + errcode = pt_last_ip_update_ip(&last_ip, NULL, NULL); + ptu_int_eq(errcode, -pte_invalid); + ptu_uint_eq(last_ip.ip, 0x7fffffffffffffffull); + ptu_uint_eq(last_ip.have_ip, have_ip); + ptu_uint_eq(last_ip.suppressed, 0); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, init); + ptu_run(suite, init_null); + ptu_run(suite, status_initial); + ptu_run(suite, status); + ptu_run(suite, status_null); + ptu_run(suite, status_noip); + ptu_run(suite, status_suppressed); + ptu_run(suite, query_initial); + ptu_run(suite, query); + ptu_run(suite, query_null); + ptu_run(suite, query_noip); + ptu_run(suite, query_suppressed); + ptu_run_p(suite, update_ip_suppressed, 0); + ptu_run_p(suite, update_ip_suppressed, 1); + ptu_run_p(suite, update_ip_upd16, 0); + ptu_run_p(suite, update_ip_upd16, 1); + ptu_run_p(suite, update_ip_upd32, 0); + ptu_run_p(suite, update_ip_upd32, 1); + ptu_run_p(suite, update_ip_sext48, 0); + ptu_run_p(suite, update_ip_sext48, 1); + ptu_run_p(suite, update_ip_bad_packet, 0); + ptu_run_p(suite, update_ip_bad_packet, 1); + ptu_run(suite, update_ip_null_ip); + ptu_run_p(suite, update_ip_null_packet, 0); + ptu_run_p(suite, update_ip_null_packet, 1); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-mapped_section.c b/libipt/test/src/ptunit-mapped_section.c new file mode 100644 index 0000000..8cadcd6 --- /dev/null +++ b/libipt/test/src/ptunit-mapped_section.c @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_mapped_section.h" + +#include "intel-pt.h" + + +uint64_t pt_section_size(const struct pt_section *section) +{ + if (!section) + return 0ull; + + return 0x1000ull; +} + +static struct ptunit_result begin(void) +{ + struct pt_mapped_section msec; + struct pt_section sec; + uint64_t begin; + + pt_msec_init(&msec, &sec, NULL, 0x2000ull); + + begin = pt_msec_begin(&msec); + ptu_uint_eq(begin, 0x2000); + + return ptu_passed(); +} + +static struct ptunit_result end(void) +{ + struct pt_mapped_section msec; + struct pt_section sec; + uint64_t begin; + + pt_msec_init(&msec, &sec, NULL, 0x2000ull); + + begin = pt_msec_end(&msec); + ptu_uint_eq(begin, 0x3000); + + return ptu_passed(); +} + +static struct ptunit_result end_bad(void) +{ + struct pt_mapped_section msec; + uint64_t end; + + pt_msec_init(&msec, NULL, NULL, 0x2000ull); + + end = pt_msec_end(&msec); + ptu_uint_eq(end, 0ull); + + return ptu_passed(); +} + +static struct ptunit_result asid(void) +{ + struct pt_mapped_section msec; + struct pt_asid asid; + const struct pt_asid *pasid; + + pt_asid_init(&asid); + asid.cr3 = 0xa00000ull; + asid.vmcs = 0xb00000ull; + + pt_msec_init(&msec, NULL, &asid, 0x2000ull); + + pasid = pt_msec_asid(&msec); + ptu_ptr(pasid); + ptu_uint_eq(pasid->cr3, asid.cr3); + ptu_uint_eq(pasid->vmcs, asid.vmcs); + + return ptu_passed(); +} + +static struct ptunit_result asid_null(void) +{ + struct pt_mapped_section msec; + const struct pt_asid *pasid; + + pt_msec_init(&msec, NULL, NULL, 0x2000ull); + + pasid = pt_msec_asid(&msec); + ptu_ptr(pasid); + ptu_uint_eq(pasid->cr3, pt_asid_no_cr3); + ptu_uint_eq(pasid->vmcs, pt_asid_no_vmcs); + + return ptu_passed(); +} + +static struct ptunit_result map(void) +{ + struct pt_mapped_section msec; + uint64_t mapped; + + pt_msec_init(&msec, NULL, NULL, 0x2000ull); + + mapped = pt_msec_map(&msec, 0x1000); + ptu_uint_eq(mapped, 0x3000); + + return ptu_passed(); +} + +static struct ptunit_result unmap(void) +{ + struct pt_mapped_section msec; + uint64_t offset; + + pt_msec_init(&msec, NULL, NULL, 0x2000ull); + + offset = pt_msec_unmap(&msec, 0x3000); + ptu_uint_eq(offset, 0x1000); + + return ptu_passed(); +} + +static struct ptunit_result section(void) +{ + static struct pt_section section; + struct pt_mapped_section msec; + struct pt_section *psection; + + pt_msec_init(&msec, §ion, NULL, 0x2000ull); + + psection = pt_msec_section(&msec); + ptu_ptr_eq(psection, §ion); + + return ptu_passed(); +} + +static struct ptunit_result section_null(void) +{ + struct pt_mapped_section msec; + struct pt_section *psection; + + pt_msec_init(&msec, NULL, NULL, 0x2000ull); + + psection = pt_msec_section(&msec); + ptu_ptr_eq(psection, NULL); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, begin); + ptu_run(suite, end); + ptu_run(suite, end_bad); + ptu_run(suite, asid); + ptu_run(suite, asid_null); + ptu_run(suite, map); + ptu_run(suite, unmap); + ptu_run(suite, section); + ptu_run(suite, section_null); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-packet.c b/libipt/test/src/ptunit-packet.c new file mode 100644 index 0000000..9974901 --- /dev/null +++ b/libipt/test/src/ptunit-packet.c @@ -0,0 +1,741 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_packet_decoder.h" +#include "pt_encoder.h" + +#include "intel-pt.h" + +#include + + +/* A test fixture providing everything needed for packet en- and de-coding. */ +struct packet_fixture { + /* The trace buffer. */ + uint8_t buffer[64]; + + /* Two packets for encoding[0] and decoding[1]. */ + struct pt_packet packet[2]; + + /* The configuration. */ + struct pt_config config; + + /* The encoder. */ + struct pt_encoder encoder; + + /* The decoder. */ + struct pt_packet_decoder decoder; + + /* The return value for an unknown decode. */ + int unknown; + + /* The test fixture initialization and finalization functions. */ + struct ptunit_result (*init)(struct packet_fixture *); + struct ptunit_result (*fini)(struct packet_fixture *); +}; + +static int pfix_decode_unknown(struct pt_packet_unknown *packet, + const struct pt_config *config, + const uint8_t *pos, void *context) +{ + struct packet_fixture *pfix; + + if (!packet || !config) + return -pte_internal; + + pfix = (struct packet_fixture *) context; + if (!pfix) + return -pte_internal; + + if (config->begin != pfix->buffer) + return -pte_internal; + + if (config->end != pfix->buffer + sizeof(pfix->buffer)) + return -pte_internal; + + if (pos != pfix->buffer) + return -pte_internal; + + packet->priv = pfix; + + return pfix->unknown; +} + +static struct ptunit_result pfix_init(struct packet_fixture *pfix) +{ + int errcode; + + memset(pfix->buffer, 0, sizeof(pfix->buffer)); + memset(pfix->packet, 0, sizeof(pfix->packet)); + memset(&pfix->config, 0, sizeof(pfix->config)); + pfix->config.size = sizeof(pfix->config); + pfix->config.begin = pfix->buffer; + pfix->config.end = pfix->buffer + sizeof(pfix->buffer); + pfix->config.decode.callback = pfix_decode_unknown; + pfix->config.decode.context = pfix; + + pt_encoder_init(&pfix->encoder, &pfix->config); + pt_pkt_decoder_init(&pfix->decoder, &pfix->config); + + errcode = pt_pkt_sync_set(&pfix->decoder, 0x0ull); + ptu_int_eq(errcode, 0); + + pfix->unknown = 0; + + return ptu_passed(); +} + +static struct ptunit_result pfix_fini(struct packet_fixture *pfix) +{ + pt_encoder_fini(&pfix->encoder); + pt_pkt_decoder_fini(&pfix->decoder); + + return ptu_passed(); +} + +static struct ptunit_result ptu_pkt_eq(const struct pt_packet *enc, + const struct pt_packet *dec) +{ + const uint8_t *renc, *rdec; + size_t byte; + + ptu_ptr(enc); + ptu_ptr(dec); + + renc = (const uint8_t *) enc; + rdec = (const uint8_t *) dec; + + for (byte = 0; byte < sizeof(*enc); ++byte) + ptu_uint_eq(renc[byte], rdec[byte]); + + return ptu_passed(); +} + +static struct ptunit_result pfix_test(struct packet_fixture *pfix) +{ + int size; + + size = pt_enc_next(&pfix->encoder, &pfix->packet[0]); + ptu_int_gt(size, 0); + + pfix->packet[0].size = (uint8_t) size; + + size = pt_pkt_next(&pfix->decoder, &pfix->packet[1], + sizeof(pfix->packet[1])); + ptu_int_gt(size, 0); + + return ptu_pkt_eq(&pfix->packet[0], &pfix->packet[1]); +} + +static struct ptunit_result no_payload(struct packet_fixture *pfix, + enum pt_packet_type type) +{ + pfix->packet[0].type = type; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result unknown(struct packet_fixture *pfix, int exp) +{ + int size; + + pfix->buffer[0] = pt_opc_bad; + pfix->unknown = exp; + + size = pt_pkt_next(&pfix->decoder, &pfix->packet[1], + sizeof(pfix->packet[1])); + ptu_int_eq(size, pfix->unknown); + + if (size >= 0) { + ptu_int_eq(pfix->packet[1].type, ppt_unknown); + ptu_uint_eq(pfix->packet[1].size, (uint8_t) size); + ptu_ptr_eq(pfix->packet[1].payload.unknown.packet, + pfix->buffer); + ptu_ptr_eq(pfix->packet[1].payload.unknown.priv, pfix); + } + + return ptu_passed(); +} + +static struct ptunit_result unknown_ext(struct packet_fixture *pfix, int exp) +{ + int size; + + pfix->buffer[0] = pt_opc_ext; + pfix->buffer[1] = pt_ext_bad; + pfix->unknown = exp; + + size = pt_pkt_next(&pfix->decoder, &pfix->packet[1], + sizeof(pfix->packet[1])); + ptu_int_eq(size, pfix->unknown); + + if (size >= 0) { + ptu_int_eq(pfix->packet[1].type, ppt_unknown); + ptu_uint_eq(pfix->packet[1].size, (uint8_t) size); + ptu_ptr_eq(pfix->packet[1].payload.unknown.packet, + pfix->buffer); + ptu_ptr_eq(pfix->packet[1].payload.unknown.priv, pfix); + } + + return ptu_passed(); +} + +static struct ptunit_result unknown_ext2(struct packet_fixture *pfix, int exp) +{ + int size; + + pfix->buffer[0] = pt_opc_ext; + pfix->buffer[1] = pt_ext_ext2; + pfix->buffer[2] = pt_ext2_bad; + pfix->unknown = exp; + + size = pt_pkt_next(&pfix->decoder, &pfix->packet[1], + sizeof(pfix->packet[1])); + ptu_int_eq(size, exp); + + if (exp >= 0) { + ptu_int_eq(pfix->packet[1].type, ppt_unknown); + ptu_uint_eq(pfix->packet[1].size, (uint8_t) size); + ptu_ptr_eq(pfix->packet[1].payload.unknown.packet, + pfix->buffer); + ptu_ptr_eq(pfix->packet[1].payload.unknown.priv, pfix); + } + + return ptu_passed(); +} + +static struct ptunit_result tnt_8(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_tnt_8; + pfix->packet[0].payload.tnt.bit_size = 4; + pfix->packet[0].payload.tnt.payload = 0x5ull; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result tnt_64(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_tnt_64; + pfix->packet[0].payload.tnt.bit_size = 23; + pfix->packet[0].payload.tnt.payload = 0xabcdeull; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result ip(struct packet_fixture *pfix, + enum pt_packet_type type, + enum pt_ip_compression ipc, + uint64_t ip) +{ + pfix->packet[0].type = type; + pfix->packet[0].payload.ip.ipc = ipc; + pfix->packet[0].payload.ip.ip = ip; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result mode_exec(struct packet_fixture *pfix, + enum pt_exec_mode mode) +{ + struct pt_packet_mode_exec packet; + + packet = pt_set_exec_mode(mode); + + pfix->packet[0].type = ppt_mode; + pfix->packet[0].payload.mode.leaf = pt_mol_exec; + pfix->packet[0].payload.mode.bits.exec.csl = packet.csl; + pfix->packet[0].payload.mode.bits.exec.csd = packet.csd; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result mode_tsx(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_mode; + pfix->packet[0].payload.mode.leaf = pt_mol_tsx; + pfix->packet[0].payload.mode.bits.tsx.intx = 1; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result pip(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_pip; + pfix->packet[0].payload.pip.cr3 = 0x4200ull; + pfix->packet[0].payload.pip.nr = 1; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result tsc(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_tsc; + pfix->packet[0].payload.tsc.tsc = 0x42ull; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result cbr(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_cbr; + pfix->packet[0].payload.cbr.ratio = 0x23; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result tma(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_tma; + pfix->packet[0].payload.tma.ctc = 0x42; + pfix->packet[0].payload.tma.fc = 0x123; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result tma_bad(struct packet_fixture *pfix) +{ + int errcode; + + pfix->packet[0].type = ppt_tma; + pfix->packet[0].payload.tma.ctc = 0x42; + pfix->packet[0].payload.tma.fc = 0x200; + + errcode = pt_enc_next(&pfix->encoder, &pfix->packet[0]); + ptu_int_eq(errcode, -pte_bad_packet); + + return ptu_passed(); +} + +static struct ptunit_result mtc(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_mtc; + pfix->packet[0].payload.mtc.ctc = 0x23; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result cyc(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_cyc; + pfix->packet[0].payload.cyc.value = 0x23; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result vmcs(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_vmcs; + pfix->packet[0].payload.vmcs.base = 0xabcdef000ull; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result mnt(struct packet_fixture *pfix) +{ + pfix->packet[0].type = ppt_mnt; + pfix->packet[0].payload.mnt.payload = 0x1234567890abcdefull; + + ptu_test(pfix_test, pfix); + + return ptu_passed(); +} + +static struct ptunit_result cutoff(struct packet_fixture *pfix, + enum pt_packet_type type) +{ + int size; + + pfix->packet[0].type = type; + + size = pt_enc_next(&pfix->encoder, &pfix->packet[0]); + ptu_int_gt(size, 0); + + pfix->decoder.config.end = pfix->encoder.pos - 1; + + size = pt_pkt_next(&pfix->decoder, &pfix->packet[1], + sizeof(pfix->packet[1])); + ptu_int_eq(size, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result cutoff_ip(struct packet_fixture *pfix, + enum pt_packet_type type) +{ + int size; + + pfix->packet[0].type = type; + pfix->packet[0].payload.ip.ipc = pt_ipc_sext_48; + + size = pt_enc_next(&pfix->encoder, &pfix->packet[0]); + ptu_int_gt(size, 0); + + pfix->decoder.config.end = pfix->encoder.pos - 1; + + size = pt_pkt_next(&pfix->decoder, &pfix->packet[1], + sizeof(pfix->packet[1])); + ptu_int_eq(size, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result cutoff_cyc(struct packet_fixture *pfix) +{ + int size; + + pfix->packet[0].type = ppt_cyc; + pfix->packet[0].payload.cyc.value = 0xa8; + + size = pt_enc_next(&pfix->encoder, &pfix->packet[0]); + ptu_int_gt(size, 0); + + pfix->decoder.config.end = pfix->encoder.pos - 1; + + size = pt_pkt_next(&pfix->decoder, &pfix->packet[1], + sizeof(pfix->packet[1])); + ptu_int_eq(size, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result cutoff_mode(struct packet_fixture *pfix, + enum pt_mode_leaf leaf) +{ + int size; + + pfix->packet[0].type = ppt_mode; + pfix->packet[0].payload.mode.leaf = leaf; + + size = pt_enc_next(&pfix->encoder, &pfix->packet[0]); + ptu_int_gt(size, 0); + + pfix->decoder.config.end = pfix->encoder.pos - 1; + + size = pt_pkt_next(&pfix->decoder, &pfix->packet[1], + sizeof(pfix->packet[1])); + ptu_int_eq(size, -pte_eos); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct packet_fixture pfix; + struct ptunit_suite suite; + + pfix.init = pfix_init; + pfix.fini = pfix_fini; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run_fp(suite, no_payload, pfix, ppt_pad); + ptu_run_fp(suite, no_payload, pfix, ppt_psb); + ptu_run_fp(suite, no_payload, pfix, ppt_ovf); + ptu_run_fp(suite, no_payload, pfix, ppt_psbend); + ptu_run_fp(suite, no_payload, pfix, ppt_stop); + + ptu_run_fp(suite, unknown, pfix, 4); + ptu_run_fp(suite, unknown, pfix, -pte_nomem); + ptu_run_fp(suite, unknown_ext, pfix, 4); + ptu_run_fp(suite, unknown_ext, pfix, -pte_nomem); + ptu_run_fp(suite, unknown_ext2, pfix, 4); + ptu_run_fp(suite, unknown_ext2, pfix, -pte_nomem); + + ptu_run_f(suite, tnt_8, pfix); + ptu_run_f(suite, tnt_64, pfix); + + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_suppressed, 0x0ull); + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_16, 0x42ull); + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_32, 0x4200ull); + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_48, 0x4200ull); + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_sext_48, 0x42ull); + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_full, 0x42ull); + + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_suppressed, 0x0ull); + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_16, 0x42ull); + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_32, 0x4200ull); + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_48, 0x4200ull); + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_sext_48, 0x42ull); + ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_full, 0x42ull); + + ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_suppressed, 0x0ull); + ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_update_16, 0x42ull); + ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_update_32, 0x4200ull); + ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_update_48, 0x4200ull); + ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_sext_48, 0x42ull); + ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_full, 0x42ull); + + ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_suppressed, 0x0ull); + ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_update_16, 0x42ull); + ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_update_32, 0x4200ull); + ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_update_48, 0x4200ull); + ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_sext_48, 0x42ull); + ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_full, 0x42ull); + + ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_suppressed, 0x0ull); + ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_update_16, 0x42ull); + ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_update_32, 0x4200ull); + ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_update_48, 0x4200ull); + ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_sext_48, 0x42ull); + ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_full, 0x42ull); + + ptu_run_fp(suite, mode_exec, pfix, ptem_16bit); + ptu_run_fp(suite, mode_exec, pfix, ptem_32bit); + ptu_run_fp(suite, mode_exec, pfix, ptem_64bit); + ptu_run_f(suite, mode_tsx, pfix); + + ptu_run_f(suite, pip, pfix); + ptu_run_f(suite, tsc, pfix); + ptu_run_f(suite, cbr, pfix); + ptu_run_f(suite, tma, pfix); + ptu_run_f(suite, tma_bad, pfix); + ptu_run_f(suite, mtc, pfix); + ptu_run_f(suite, cyc, pfix); + ptu_run_f(suite, vmcs, pfix); + ptu_run_f(suite, mnt, pfix); + + ptu_run_fp(suite, cutoff, pfix, ppt_psb); + ptu_run_fp(suite, cutoff_ip, pfix, ppt_tip); + ptu_run_fp(suite, cutoff_ip, pfix, ppt_tip_pge); + ptu_run_fp(suite, cutoff_ip, pfix, ppt_tip_pgd); + ptu_run_fp(suite, cutoff_ip, pfix, ppt_fup); + ptu_run_fp(suite, cutoff, pfix, ppt_ovf); + ptu_run_fp(suite, cutoff, pfix, ppt_psbend); + ptu_run_fp(suite, cutoff, pfix, ppt_tnt_64); + ptu_run_fp(suite, cutoff, pfix, ppt_tsc); + ptu_run_fp(suite, cutoff, pfix, ppt_cbr); + ptu_run_fp(suite, cutoff, pfix, ppt_tma); + ptu_run_fp(suite, cutoff, pfix, ppt_mtc); + ptu_run_f(suite, cutoff_cyc, pfix); + ptu_run_fp(suite, cutoff_mode, pfix, pt_mol_exec); + ptu_run_fp(suite, cutoff_mode, pfix, pt_mol_tsx); + ptu_run_fp(suite, cutoff, pfix, ppt_vmcs); + ptu_run_fp(suite, cutoff, pfix, ppt_mnt); + + ptunit_report(&suite); + return suite.nr_fails; +} + + +/* Dummy decode functions to satisfy link dependencies. + * + * As a nice side-effect, we will know if we need to add more tests when + * adding new decoder functions. + */ +struct pt_query_decoder; + +int pt_qry_decode_unknown(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_pad(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_psb(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_tip(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_tnt_8(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_tnt_64(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_tip_pge(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_tip_pgd(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_fup(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_fup(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_pip(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_pip(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_ovf(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_mode(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_mode(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_psbend(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_tsc(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_tsc(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_cbr(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_cbr(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_tma(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_mtc(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_cyc(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_stop(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_vmcs(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_header_vmcs(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} +int pt_qry_decode_mnt(struct pt_query_decoder *d) +{ + (void) d; + + return -pte_internal; +} diff --git a/libipt/test/src/ptunit-query.c b/libipt/test/src/ptunit-query.c new file mode 100644 index 0000000..2f11cd3 --- /dev/null +++ b/libipt/test/src/ptunit-query.c @@ -0,0 +1,2846 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_last_ip.h" +#include "pt_decoder_function.h" +#include "pt_query_decoder.h" +#include "pt_encoder.h" + + +/* A query testing fixture. */ + +struct ptu_decoder_fixture { + /* The test fixture initialization and finalization functions. */ + struct ptunit_result (*init)(struct ptu_decoder_fixture *); + struct ptunit_result (*fini)(struct ptu_decoder_fixture *); + + /* Encode an optional header for the test to read over. */ + struct ptunit_result (*header)(struct ptu_decoder_fixture *); + + /* The trace buffer. */ + uint8_t buffer[1024]; + + /* The configuration under test. */ + struct pt_config config; + + /* A encoder and query decoder for the above configuration. */ + struct pt_encoder encoder; + struct pt_query_decoder decoder; + + /* For tracking last-ip in tests. */ + struct pt_last_ip last_ip; +}; + +/* An invalid address. */ +static const uint64_t pt_dfix_bad_ip = (1ull << 62) - 1; + +/* A sign-extended address. */ +static const uint64_t pt_dfix_sext_ip = 0xffffff00ff00ff00ull; + +/* The highest possible address. */ +static const uint64_t pt_dfix_max_ip = (1ull << 47) - 1; + +/* The highest possible cr3 value. */ +static const uint64_t pt_dfix_max_cr3 = ((1ull << 47) - 1) & ~0x1f; + +/* Synchronize the decoder at the beginning of the trace stream, avoiding the + * initial PSB header. + */ +static struct ptunit_result ptu_sync_decoder(struct pt_query_decoder *decoder) +{ + ptu_ptr(decoder); + decoder->enabled = 1; + + (void) pt_df_fetch(&decoder->next, decoder->pos, &decoder->config); + return ptu_passed(); +} + +/* Cut off the last encoded packet. */ +static struct ptunit_result cutoff(struct pt_query_decoder *decoder, + const struct pt_encoder *encoder) +{ + uint8_t *pos; + + ptu_ptr(decoder); + ptu_ptr(encoder); + + pos = encoder->pos; + ptu_ptr(pos); + + pos -= 1; + ptu_ptr_le(decoder->config.begin, pos); + + decoder->config.end = pos; + return ptu_passed(); +} + +static struct ptunit_result indir_not_synced(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + uint64_t ip = pt_dfix_bad_ip, addr = ip; + int errcode; + + errcode = pt_qry_indirect_branch(decoder, &addr); + ptu_int_eq(errcode, -pte_nosync); + ptu_uint_eq(addr, ip); + + return ptu_passed(); +} + +static struct ptunit_result cond_not_synced(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + int errcode, tnt = 0xbc, taken = tnt; + + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, -pte_nosync); + ptu_int_eq(taken, tnt); + + return ptu_passed(); +} + +static struct ptunit_result event_not_synced(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_event event; + int errcode; + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_nosync); + + return ptu_passed(); +} + +static struct ptunit_result sync_backward(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t sync[3], offset, ip; + int errcode; + + /* Check that we can use repeated pt_qry_sync_backward() to iterate over + * synchronization points in backwards order. + */ + + errcode = pt_enc_get_offset(encoder, &sync[0]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + errcode = pt_enc_get_offset(encoder, &sync[1]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + errcode = pt_enc_get_offset(encoder, &sync[2]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + /* Synchronize repeatedly and check that we reach each PSB in the + * correct order. + */ + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[2]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[1]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[0]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +sync_backward_empty_end(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t sync[3], offset, ip; + int errcode; + + /* Check that we can use repeated pt_qry_sync_backward() to iterate over + * synchronization points in backwards order. + * + * There's an empty PSB+ at the end. We skip it. + */ + + errcode = pt_enc_get_offset(encoder, &sync[0]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + errcode = pt_enc_get_offset(encoder, &sync[1]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + errcode = pt_enc_get_offset(encoder, &sync[2]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_psbend(encoder); + + /* Synchronize repeatedly and check that we reach each PSB in the + * correct order. + */ + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[1]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[0]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +sync_backward_empty_mid(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t sync[3], offset, ip; + int errcode; + + /* Check that we can use repeated pt_qry_sync_backward() to iterate over + * synchronization points in backwards order. + * + * There's an empty PSB+ in the middle. We skip it. + */ + + errcode = pt_enc_get_offset(encoder, &sync[0]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + errcode = pt_enc_get_offset(encoder, &sync[1]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_psbend(encoder); + + errcode = pt_enc_get_offset(encoder, &sync[2]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + /* Synchronize repeatedly and check that we reach each PSB in the + * correct order. + */ + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[2]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[0]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +sync_backward_empty_begin(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t sync[3], offset, ip; + int errcode; + + /* Check that we can use repeated pt_qry_sync_backward() to iterate over + * synchronization points in backwards order. + * + * There's an empty PSB+ at the beginning. We skip it. + */ + + errcode = pt_enc_get_offset(encoder, &sync[0]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_psbend(encoder); + + errcode = pt_enc_get_offset(encoder, &sync[1]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + errcode = pt_enc_get_offset(encoder, &sync[2]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + /* Synchronize repeatedly and check that we reach each PSB in the + * correct order. + */ + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[2]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[1]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +decode_sync_backward(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + uint64_t sync[2], offset, ip; + int errcode; + + /* Check that we can use sync_backward to re-sync at the current trace + * segment as well as to find the previous trace segment. + */ + + errcode = pt_enc_get_offset(encoder, &sync[0]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + errcode = pt_enc_get_offset(encoder, &sync[1]); + ptu_int_ge(errcode, 0); + + pt_encode_psb(encoder); + pt_encode_mode_exec(encoder, ptem_64bit); + pt_encode_psbend(encoder); + + + errcode = pt_qry_sync_forward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[0]); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_ge(errcode, 0); + ptu_int_eq(event.type, ptev_exec_mode); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_ge(errcode, 0); + ptu_int_eq(event.type, ptev_exec_mode); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[1]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_ge(errcode, 0); + + errcode = pt_qry_get_sync_offset(decoder, &offset); + ptu_int_eq(errcode, 0); + ptu_uint_eq(offset, sync[0]); + + errcode = pt_qry_sync_backward(decoder, &ip); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result indir_null(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_config *config = &decoder->config; + uint64_t ip = pt_dfix_bad_ip, addr = ip; + int errcode; + + errcode = pt_qry_indirect_branch(NULL, &addr); + ptu_int_eq(errcode, -pte_invalid); + ptu_uint_eq(addr, ip); + + errcode = pt_qry_indirect_branch(decoder, NULL); + ptu_int_eq(errcode, -pte_invalid); + ptu_ptr_eq(decoder->pos, config->begin); + + return ptu_passed(); +} + +static struct ptunit_result indir_empty(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_config *config = &decoder->config; + uint64_t ip = pt_dfix_bad_ip, addr = ip; + int errcode; + + decoder->pos = config->end; + + errcode = pt_qry_indirect_branch(decoder, &addr); + ptu_int_eq(errcode, -pte_eos); + ptu_uint_eq(addr, ip); + + return ptu_passed(); +} + +static struct ptunit_result indir(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_packet_ip packet; + uint64_t addr = pt_dfix_bad_ip; + int errcode; + + packet.ipc = ipc; + packet.ip = pt_dfix_sext_ip; + pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config); + + pt_encode_tip(encoder, packet.ip, packet.ipc); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_indirect_branch(decoder, &addr); + if (ipc == pt_ipc_suppressed) { + ptu_int_eq(errcode, pts_ip_suppressed | pts_eos); + ptu_uint_eq(addr, pt_dfix_bad_ip); + } else { + ptu_int_eq(errcode, pts_eos); + ptu_uint_eq(addr, dfix->last_ip.ip); + } + + return ptu_passed(); +} + +static struct ptunit_result indir_tnt(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_packet_ip packet; + uint64_t addr = pt_dfix_bad_ip; + int errcode; + + packet.ipc = ipc; + packet.ip = pt_dfix_sext_ip; + pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config); + + pt_encode_tnt_8(encoder, 0ull, 1); + pt_encode_tip(encoder, packet.ip, packet.ipc); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_indirect_branch(decoder, &addr); + if (ipc == pt_ipc_suppressed) { + ptu_int_eq(errcode, pts_ip_suppressed); + ptu_uint_eq(addr, pt_dfix_bad_ip); + } else { + ptu_int_eq(errcode, 0); + ptu_uint_eq(addr, dfix->last_ip.ip); + } + + return ptu_passed(); +} + +static struct ptunit_result indir_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t ip = pt_dfix_bad_ip, addr = ip; + int errcode; + + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_indirect_branch(decoder, &addr); + ptu_int_eq(errcode, -pte_eos); + ptu_uint_eq(addr, ip); + + return ptu_passed(); +} + +static struct ptunit_result +indir_skip_tnt_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t ip = pt_dfix_bad_ip, addr = ip; + int errcode; + + pt_encode_tnt_8(encoder, 0, 1); + pt_encode_tnt_8(encoder, 0, 1); + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_indirect_branch(decoder, &addr); + ptu_int_eq(errcode, -pte_bad_query); + ptu_uint_eq(addr, ip); + + return ptu_passed(); +} + +static struct ptunit_result +indir_skip_tip_pge_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t ip = pt_dfix_bad_ip, addr = ip; + const uint8_t *pos; + int errcode; + + pos = encoder->pos; + pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48); + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_indirect_branch(decoder, &addr); + ptu_int_eq(errcode, -pte_bad_query); + ptu_ptr_eq(decoder->pos, pos); + ptu_uint_eq(addr, ip); + + return ptu_passed(); +} + +static struct ptunit_result +indir_skip_tip_pgd_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t ip = pt_dfix_bad_ip, addr = ip; + const uint8_t *pos; + int errcode; + + pos = encoder->pos; + pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48); + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_indirect_branch(decoder, &addr); + ptu_int_eq(errcode, -pte_bad_query); + ptu_ptr_eq(decoder->pos, pos); + ptu_uint_eq(addr, ip); + + return ptu_passed(); +} + +static struct ptunit_result +indir_skip_fup_tip_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t ip = pt_dfix_bad_ip, addr = ip; + const uint8_t *pos; + int errcode; + + pt_encode_fup(encoder, 0, pt_ipc_sext_48); + pos = encoder->pos; + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_indirect_branch(decoder, &addr); + ptu_int_eq(errcode, -pte_bad_query); + ptu_ptr_eq(decoder->pos, pos); + ptu_uint_eq(addr, ip); + + return ptu_passed(); +} + +static struct ptunit_result +indir_skip_fup_tip_pgd_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t ip = pt_dfix_bad_ip, addr = ip; + const uint8_t *pos; + int errcode; + + pt_encode_fup(encoder, 0, pt_ipc_sext_48); + pos = encoder->pos; + pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48); + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_indirect_branch(decoder, &addr); + ptu_int_eq(errcode, -pte_bad_query); + ptu_ptr_eq(decoder->pos, pos); + ptu_uint_eq(addr, ip); + + return ptu_passed(); +} + +static struct ptunit_result cond_null(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_config *config = &decoder->config; + int errcode, tnt = 0xbc, taken = tnt; + + errcode = pt_qry_cond_branch(NULL, &taken); + ptu_int_eq(errcode, -pte_invalid); + ptu_int_eq(taken, tnt); + + errcode = pt_qry_cond_branch(decoder, NULL); + ptu_int_eq(errcode, -pte_invalid); + ptu_ptr_eq(decoder->pos, config->begin); + + return ptu_passed(); +} + +static struct ptunit_result cond_empty(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_config *config = &decoder->config; + int errcode, tnt = 0xbc, taken = tnt; + + decoder->pos = config->end; + + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, -pte_eos); + ptu_int_eq(taken, tnt); + + return ptu_passed(); +} + +static struct ptunit_result cond(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + int errcode, tnt = 0xbc, taken = tnt; + + pt_encode_tnt_8(encoder, 0x02, 3); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, 0); + ptu_int_eq(taken, 0); + + taken = tnt; + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, 0); + ptu_int_eq(taken, 1); + + taken = tnt; + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(taken, 0); + + taken = tnt; + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, -pte_eos); + ptu_int_eq(taken, tnt); + + return ptu_passed(); +} + +static struct ptunit_result cond_skip_tip_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + int errcode, tnt = 0xbc, taken = tnt; + const uint8_t *pos; + + pos = encoder->pos; + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + pt_encode_tnt_8(encoder, 0, 1); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, -pte_bad_query); + ptu_ptr_eq(decoder->pos, pos); + ptu_int_eq(taken, tnt); + + return ptu_passed(); +} + +static struct ptunit_result +cond_skip_tip_pge_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + int errcode, tnt = 0xbc, taken = tnt; + const uint8_t *pos; + + pos = encoder->pos; + pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48); + pt_encode_tnt_8(encoder, 0, 1); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, -pte_bad_query); + ptu_ptr_eq(decoder->pos, pos); + ptu_int_eq(taken, tnt); + + return ptu_passed(); +} + +static struct ptunit_result +cond_skip_tip_pgd_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + int errcode, tnt = 0xbc, taken = tnt; + const uint8_t *pos; + + pos = encoder->pos; + pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48); + pt_encode_tnt_8(encoder, 0, 1); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, -pte_bad_query); + ptu_ptr_eq(decoder->pos, pos); + ptu_int_eq(taken, tnt); + + return ptu_passed(); +} + +static struct ptunit_result +cond_skip_fup_tip_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + int errcode, tnt = 0xbc, taken = tnt; + const uint8_t *pos; + + pt_encode_fup(encoder, 0, pt_ipc_sext_48); + pos = encoder->pos; + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + pt_encode_tnt_8(encoder, 0, 1); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, -pte_bad_query); + ptu_ptr_eq(decoder->pos, pos); + ptu_int_eq(taken, tnt); + + return ptu_passed(); +} + +static struct ptunit_result +cond_skip_fup_tip_pgd_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + int errcode, tnt = 0xbc, taken = tnt; + const uint8_t *pos; + + pt_encode_fup(encoder, 0, pt_ipc_sext_48); + pos = encoder->pos; + pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48); + pt_encode_tnt_8(encoder, 0, 1); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_cond_branch(decoder, &taken); + ptu_int_eq(errcode, -pte_bad_query); + ptu_ptr_eq(decoder->pos, pos); + ptu_int_eq(taken, tnt); + + return ptu_passed(); +} + +static struct ptunit_result event_null(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_config *config = &decoder->config; + struct pt_event event; + int errcode; + + errcode = pt_qry_event(NULL, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_invalid); + + errcode = pt_qry_event(decoder, NULL, sizeof(event)); + ptu_int_eq(errcode, -pte_invalid); + ptu_ptr_eq(decoder->pos, config->begin); + + return ptu_passed(); +} + +static struct ptunit_result event_bad_size(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_event event; + int errcode; + + errcode = pt_qry_event(decoder, &event, 4); + ptu_int_eq(errcode, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result event_small_size(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + union { + struct pt_event event; + uint8_t buffer[41]; + } variant; + int errcode; + + memset(variant.buffer, 0xcd, sizeof(variant.buffer)); + + pt_encode_tip_pge(encoder, 0ull, pt_ipc_sext_48); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &variant.event, 40); + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(variant.event.type, ptev_enabled); + ptu_uint_eq(variant.buffer[40], 0xcd); + + return ptu_passed(); +} + +static struct ptunit_result event_big_size(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + union { + struct pt_event event; + uint8_t buffer[1024]; + } variant; + int errcode; + + memset(variant.buffer, 0xcd, sizeof(variant.buffer)); + + pt_encode_tip_pge(encoder, 0ull, pt_ipc_sext_48); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &variant.event, sizeof(variant.buffer)); + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(variant.event.type, ptev_enabled); + ptu_uint_eq(variant.buffer[sizeof(variant.event)], 0xcd); + + return ptu_passed(); +} + +static struct ptunit_result event_empty(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_config *config = &decoder->config; + struct pt_event event; + int errcode; + + decoder->pos = config->end; + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result event_enabled(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc, + uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_packet_ip packet; + struct pt_event event; + int errcode; + + packet.ipc = ipc; + packet.ip = pt_dfix_max_ip; + pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config); + + pt_encode_tip_pge(encoder, packet.ip, packet.ipc); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + if (ipc == pt_ipc_suppressed) + ptu_int_eq(errcode, -pte_bad_packet); + else { + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(event.type, ptev_enabled); + ptu_uint_eq(event.variant.enabled.ip, dfix->last_ip.ip); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + } + + return ptu_passed(); +} + +static struct ptunit_result +event_enabled_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result event_disabled(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc, + uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_packet_ip packet; + struct pt_event event; + int errcode; + + packet.ipc = ipc; + packet.ip = pt_dfix_sext_ip; + pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config); + + pt_encode_tip_pgd(encoder, packet.ip, packet.ipc); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_eos); + if (ipc == pt_ipc_suppressed) + ptu_uint_ne(event.ip_suppressed, 0); + else { + ptu_uint_eq(event.ip_suppressed, 0); + ptu_uint_eq(event.variant.disabled.ip, dfix->last_ip.ip); + } + ptu_int_eq(event.type, ptev_disabled); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + return ptu_passed(); +} + +static struct ptunit_result +event_disabled_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_tip_pgd(encoder, 0, pt_ipc_update_32); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +event_async_disabled(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc, uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_packet_ip fup, tip; + struct pt_event event; + int errcode; + + fup.ipc = pt_ipc_sext_48; + fup.ip = pt_dfix_max_ip; + pt_last_ip_update_ip(&dfix->last_ip, &fup, &dfix->config); + + tip.ipc = ipc; + tip.ip = pt_dfix_sext_ip; + pt_last_ip_update_ip(&dfix->last_ip, &tip, &dfix->config); + + pt_encode_fup(encoder, fup.ip, fup.ipc); + pt_encode_tip_pgd(encoder, tip.ip, tip.ipc); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_eos); + if (ipc == pt_ipc_suppressed) + ptu_uint_ne(event.ip_suppressed, 0); + else { + ptu_uint_eq(event.ip_suppressed, 0); + ptu_uint_eq(event.variant.async_disabled.ip, dfix->last_ip.ip); + } + ptu_int_eq(event.type, ptev_async_disabled); + ptu_uint_eq(event.variant.async_disabled.at, fup.ip); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + return ptu_passed(); +} + +static struct ptunit_result +event_async_disabled_suppressed_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_fup(encoder, 0, pt_ipc_suppressed); + pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_bad_packet); + + return ptu_passed(); +} + +static struct ptunit_result +event_async_disabled_cutoff_fail_a(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + uint64_t at = pt_dfix_sext_ip; + const uint8_t *pos; + int errcode; + + pt_encode_fup(encoder, at, pt_ipc_sext_48); + pos = encoder->pos; + pt_encode_tip_pgd(encoder, 0, pt_ipc_update_16); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + ptu_ptr_eq(decoder->pos, pos); + + return ptu_passed(); +} + +static struct ptunit_result +event_async_disabled_cutoff_fail_b(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + const uint8_t *pos; + int errcode; + + pos = encoder->pos; + pt_encode_fup(encoder, 0, pt_ipc_sext_48); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + ptu_ptr_eq(decoder->pos, pos); + + return ptu_passed(); +} + +static struct ptunit_result +event_async_branch_suppressed_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + const uint8_t *pos; + int errcode; + + pos = encoder->pos; + pt_encode_fup(encoder, 0, pt_ipc_suppressed); + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_bad_packet); + ptu_ptr_eq(decoder->pos, pos); + + return ptu_passed(); +} + +static struct ptunit_result event_async_branch(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc, + uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_packet_ip fup, tip; + struct pt_event event; + int errcode; + + fup.ipc = pt_ipc_sext_48; + fup.ip = pt_dfix_max_ip; + pt_last_ip_update_ip(&dfix->last_ip, &fup, &dfix->config); + + tip.ipc = ipc; + tip.ip = pt_dfix_sext_ip; + pt_last_ip_update_ip(&dfix->last_ip, &tip, &dfix->config); + + pt_encode_fup(encoder, fup.ip, fup.ipc); + pt_encode_tip(encoder, tip.ip, tip.ipc); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_eos); + if (ipc == pt_ipc_suppressed) + ptu_uint_ne(event.ip_suppressed, 0); + else { + ptu_uint_eq(event.ip_suppressed, 0); + ptu_uint_eq(event.variant.async_branch.to, dfix->last_ip.ip); + } + ptu_int_eq(event.type, ptev_async_branch); + ptu_uint_eq(event.variant.async_branch.from, fup.ip); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + return ptu_passed(); +} + +static struct ptunit_result +event_async_branch_cutoff_fail_a(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + const uint8_t *pos; + int errcode; + + pt_encode_fup(encoder, 0, pt_ipc_sext_48); + pos = encoder->pos; + pt_encode_tip_pgd(encoder, 0, pt_ipc_update_16); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + ptu_ptr_eq(decoder->pos, pos); + + return ptu_passed(); +} + +static struct ptunit_result +event_async_branch_cutoff_fail_b(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + const uint8_t *pos; + int errcode; + + pos = encoder->pos; + pt_encode_fup(encoder, 0, pt_ipc_sext_48); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + ptu_ptr_eq(decoder->pos, pos); + + return ptu_passed(); +} + +static struct ptunit_result event_paging(struct ptu_decoder_fixture *dfix, + uint8_t flags, uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + uint64_t cr3 = pt_dfix_max_cr3; + int errcode; + + pt_encode_pip(encoder, cr3, flags); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(event.type, ptev_paging); + ptu_uint_eq(event.variant.paging.cr3, cr3); + ptu_uint_eq(event.variant.paging.non_root, (flags & pt_pl_pip_nr) != 0); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + return ptu_passed(); +} + +static struct ptunit_result +event_paging_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_pip(encoder, 0, 0); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +event_async_paging(struct ptu_decoder_fixture *dfix, uint8_t flags, + uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + uint64_t to = pt_dfix_sext_ip, from = to & ~0xffffull; + uint64_t cr3 = pt_dfix_max_cr3; + int errcode; + + pt_encode_fup(encoder, from, pt_ipc_sext_48); + pt_encode_pip(encoder, cr3, flags); + pt_encode_tip(encoder, to, pt_ipc_update_16); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_event_pending); + ptu_int_eq(event.type, ptev_async_branch); + ptu_uint_eq(event.variant.async_branch.from, from); + ptu_uint_eq(event.variant.async_branch.to, to); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(event.type, ptev_async_paging); + ptu_uint_eq(event.variant.async_paging.cr3, cr3); + ptu_uint_eq(event.variant.async_paging.non_root, + (flags & pt_pl_pip_nr) != 0); + ptu_uint_eq(event.variant.async_paging.ip, to); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + return ptu_passed(); +} + +static struct ptunit_result +event_async_paging_suppressed(struct ptu_decoder_fixture *dfix, uint8_t flags, + uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + uint64_t from = pt_dfix_sext_ip, cr3 = pt_dfix_max_cr3; + int errcode; + + pt_encode_fup(encoder, from, pt_ipc_sext_48); + pt_encode_pip(encoder, cr3, flags); + pt_encode_tip(encoder, 0, pt_ipc_suppressed); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_event_pending); + ptu_uint_ne(event.ip_suppressed, 0); + ptu_int_eq(event.type, ptev_async_branch); + ptu_uint_eq(event.variant.async_branch.from, from); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_eos); + ptu_uint_ne(event.ip_suppressed, 0); + ptu_int_eq(event.type, ptev_async_paging); + ptu_uint_eq(event.variant.async_paging.cr3, cr3); + ptu_uint_eq(event.variant.async_paging.non_root, + (flags & pt_pl_pip_nr) != 0); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + return ptu_passed(); +} + +static struct ptunit_result +event_async_paging_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_fup(encoder, 0, pt_ipc_sext_48); + pt_encode_pip(encoder, 0, 0); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result event_overflow_fup(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc, + uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + struct pt_packet_ip packet; + int errcode; + + packet.ipc = ipc; + packet.ip = 0xccull; + + pt_last_ip_init(&dfix->last_ip); + pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config); + + pt_encode_ovf(encoder); + pt_encode_fup(encoder, packet.ip, packet.ipc); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + switch (ipc) { + case pt_ipc_suppressed: + ptu_int_eq(errcode, -pte_bad_packet); + break; + + case pt_ipc_update_16: + case pt_ipc_update_32: + case pt_ipc_update_48: + case pt_ipc_sext_48: + case pt_ipc_full: + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(event.type, ptev_overflow); + ptu_uint_eq(event.variant.overflow.ip, dfix->last_ip.ip); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + break; + } + + return ptu_passed(); +} + +static struct ptunit_result +event_overflow_fup_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_ovf(encoder); + pt_encode_fup(encoder, 0, pt_ipc_sext_48); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +event_overflow_tip_pge(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc, uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + struct pt_packet_ip packet; + int errcode; + + packet.ipc = ipc; + packet.ip = 0xccull; + + pt_last_ip_init(&dfix->last_ip); + pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config); + + pt_encode_ovf(encoder); + pt_encode_tip_pge(encoder, packet.ip, packet.ipc); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_event_pending); + ptu_int_eq(event.type, ptev_overflow); + ptu_uint_ne(event.ip_suppressed, 0); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + switch (ipc) { + case pt_ipc_suppressed: + ptu_int_eq(errcode, -pte_bad_packet); + break; + + case pt_ipc_update_16: + case pt_ipc_update_32: + case pt_ipc_update_48: + case pt_ipc_sext_48: + case pt_ipc_full: + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(event.type, ptev_enabled); + ptu_uint_eq(event.variant.enabled.ip, dfix->last_ip.ip); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + break; + } + + return ptu_passed(); +} + +static struct ptunit_result +event_overflow_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_ovf(encoder); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result event_stop(struct ptu_decoder_fixture *dfix, + uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_stop(encoder); + + ptu_sync_decoder(decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(event.type, ptev_stop); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + return ptu_passed(); +} + +static struct ptunit_result +event_exec_mode_tip(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc, uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + enum pt_exec_mode mode = ptem_16bit; + struct pt_packet_ip packet; + struct pt_event event; + uint64_t addr = 0ull; + int errcode; + + packet.ipc = ipc; + packet.ip = pt_dfix_max_ip; + pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config); + + pt_encode_mode_exec(encoder, mode); + pt_encode_tip(encoder, packet.ip, packet.ipc); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, 0); + if (ipc == pt_ipc_suppressed) + ptu_uint_ne(event.ip_suppressed, 0); + else { + ptu_uint_eq(event.ip_suppressed, 0); + ptu_uint_eq(event.variant.exec_mode.ip, dfix->last_ip.ip); + } + ptu_int_eq(event.type, ptev_exec_mode); + ptu_int_eq(event.variant.exec_mode.mode, mode); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + errcode = pt_qry_indirect_branch(decoder, &addr); + if (ipc == pt_ipc_suppressed) + ptu_int_eq(errcode, pts_ip_suppressed | pts_eos); + else { + ptu_int_eq(errcode, pts_eos); + ptu_uint_eq(addr, dfix->last_ip.ip); + } + + return ptu_passed(); +} + +static struct ptunit_result +event_exec_mode_tip_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_mode_exec(encoder, ptem_32bit); + pt_encode_tip(encoder, 0, pt_ipc_update_16); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +event_exec_mode_tip_pge(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc, uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + enum pt_exec_mode mode = ptem_16bit; + struct pt_packet_ip packet; + struct pt_event event; + uint64_t addr = 0ull; + int errcode; + + packet.ipc = ipc; + packet.ip = pt_dfix_max_ip; + pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config); + + pt_encode_mode_exec(encoder, mode); + pt_encode_tip_pge(encoder, packet.ip, packet.ipc); + + ptu_check(ptu_sync_decoder, decoder); + decoder->enabled = 0; + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + if (ipc == pt_ipc_suppressed) { + ptu_int_eq(errcode, -pte_bad_packet); + ptu_uint_eq(addr, 0ull); + } else { + ptu_int_eq(errcode, pts_event_pending); + ptu_int_eq(event.type, ptev_enabled); + ptu_uint_eq(event.variant.enabled.ip, dfix->last_ip.ip); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(event.type, ptev_exec_mode); + ptu_int_eq(event.variant.exec_mode.mode, mode); + ptu_uint_eq(event.variant.exec_mode.ip, dfix->last_ip.ip); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + } + + return ptu_passed(); +} + +static struct ptunit_result +event_exec_mode_tip_pge_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_mode_exec(encoder, ptem_16bit); + pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +event_exec_mode_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_mode_exec(encoder, ptem_64bit); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result event_tsx_fup(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc, + uint8_t flags, uint64_t tsc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_packet_ip fup, tip; + struct pt_event event; + uint64_t addr = 0; + int errcode; + + fup.ipc = ipc; + fup.ip = pt_dfix_max_ip; + pt_last_ip_update_ip(&dfix->last_ip, &fup, &dfix->config); + + tip.ipc = pt_ipc_sext_48; + tip.ip = pt_dfix_sext_ip; + + pt_encode_mode_tsx(encoder, flags); + pt_encode_fup(encoder, fup.ip, fup.ipc); + pt_encode_tip(encoder, tip.ip, tip.ipc); + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, 0); + if (ipc == pt_ipc_suppressed) + ptu_uint_ne(event.ip_suppressed, 0); + else { + ptu_uint_eq(event.ip_suppressed, 0); + ptu_uint_eq(event.variant.tsx.ip, dfix->last_ip.ip); + } + ptu_int_eq(event.type, ptev_tsx); + ptu_int_eq(event.variant.tsx.speculative, + (flags & pt_mob_tsx_intx) != 0); + ptu_int_eq(event.variant.tsx.aborted, + (flags & pt_mob_tsx_abrt) != 0); + + if (!tsc) + ptu_int_eq(event.has_tsc, 0); + else { + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, tsc); + } + + errcode = pt_qry_indirect_branch(decoder, &addr); + ptu_int_eq(errcode, pts_eos); + ptu_uint_eq(addr, tip.ip); + + return ptu_passed(); +} + +static struct ptunit_result +event_tsx_fup_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_mode_tsx(encoder, 0); + pt_encode_fup(encoder, 0, pt_ipc_update_16); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +event_tsx_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_mode_tsx(encoder, 0); + + ptu_check(cutoff, decoder, encoder); + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +event_skip_tip_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + const uint8_t *pos; + int errcode; + + pos = encoder->pos; + pt_encode_tip(encoder, 0, pt_ipc_sext_48); + /* We omit the actual event - we don't get that far, anyway. */ + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_bad_query); + ptu_ptr_eq(decoder->pos, pos); + + return ptu_passed(); +} + +static struct ptunit_result +event_skip_tnt_8_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_tnt_8(encoder, 0, 1); + pt_encode_tnt_8(encoder, 0, 1); + /* We omit the actual event - we don't get that far, anyway. */ + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_bad_query); + /* The fail position depends on the fixture's header. */ + + return ptu_passed(); +} + +static struct ptunit_result +event_skip_tnt_64_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_event event; + int errcode; + + pt_encode_tnt_64(encoder, 0, 1); + pt_encode_tnt_64(encoder, 0, 1); + /* We omit the actual event - we don't get that far, anyway. */ + + ptu_check(ptu_sync_decoder, decoder); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, -pte_bad_query); + /* The fail position depends on the fixture's header. */ + + return ptu_passed(); +} + +static struct ptunit_result sync_event(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_packet_ip packet; + struct pt_event event; + uint64_t addr = 0ull; + int errcode; + + packet.ipc = ipc; + packet.ip = 0xccull; + + pt_last_ip_init(&dfix->last_ip); + pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config); + + pt_encode_psb(encoder); + pt_encode_mode_tsx(encoder, pt_mob_tsx_intx); + pt_encode_fup(encoder, packet.ip, packet.ipc); + pt_encode_psbend(encoder); + + errcode = pt_qry_sync_forward(decoder, &addr); + switch (ipc) { + case pt_ipc_suppressed: + ptu_int_eq(errcode, (pts_event_pending | pts_ip_suppressed)); + break; + + case pt_ipc_update_16: + case pt_ipc_update_32: + case pt_ipc_update_48: + case pt_ipc_sext_48: + case pt_ipc_full: + ptu_int_eq(errcode, pts_event_pending); + ptu_uint_eq(addr, dfix->last_ip.ip); + break; + } + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_eos); + ptu_uint_ne(event.status_update, 0); + if (ipc == pt_ipc_suppressed) + ptu_uint_ne(event.ip_suppressed, 0); + else { + ptu_uint_eq(event.ip_suppressed, 0); + ptu_uint_eq(event.variant.tsx.ip, dfix->last_ip.ip); + } + ptu_int_eq(event.type, ptev_tsx); + ptu_int_eq(event.variant.tsx.speculative, 1); + ptu_int_eq(event.variant.tsx.aborted, 0); + ptu_int_eq(event.has_tsc, 0); + + return ptu_passed(); +} + +static struct ptunit_result +sync_event_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t addr; + int errcode; + + pt_encode_psb(encoder); + pt_encode_psbend(encoder); + + ptu_check(cutoff, decoder, encoder); + + errcode = pt_qry_sync_forward(decoder, &addr); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result +sync_event_incomplete_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t addr; + int errcode; + + pt_encode_psb(encoder); + + errcode = pt_qry_sync_forward(decoder, &addr); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result sync_ovf_event(struct ptu_decoder_fixture *dfix, + enum pt_ip_compression ipc) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + struct pt_packet_ip fup, ovf; + struct pt_event event; + uint64_t addr = 0; + int errcode; + + fup.ipc = pt_ipc_sext_48; + fup.ip = pt_dfix_max_ip; + + ovf.ipc = ipc; + ovf.ip = 0xccull; + + pt_last_ip_init(&dfix->last_ip); + pt_last_ip_update_ip(&dfix->last_ip, &ovf, &dfix->config); + + pt_encode_psb(encoder); + pt_encode_fup(encoder, fup.ip, fup.ipc); + pt_encode_mode_tsx(encoder, 0); + pt_encode_tsc(encoder, 0x1000); + pt_encode_ovf(encoder); + pt_encode_fup(encoder, ovf.ip, ovf.ipc); + + errcode = pt_qry_sync_forward(decoder, &addr); + ptu_int_eq(errcode, pts_event_pending); + ptu_uint_eq(addr, fup.ip); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + ptu_int_eq(errcode, pts_event_pending); + ptu_uint_ne(event.status_update, 0); + ptu_int_eq(event.type, ptev_tsx); + ptu_int_eq(event.variant.tsx.speculative, 0); + ptu_int_eq(event.variant.tsx.aborted, 0); + ptu_uint_eq(event.variant.tsx.ip, fup.ip); + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, 0x1000); + + errcode = pt_qry_event(decoder, &event, sizeof(event)); + switch (ipc) { + case pt_ipc_suppressed: + ptu_int_eq(errcode, -pte_bad_packet); + return ptu_passed(); + + case pt_ipc_update_16: + case pt_ipc_update_32: + case pt_ipc_update_48: + case pt_ipc_sext_48: + case pt_ipc_full: + ptu_int_eq(errcode, pts_eos); + ptu_int_eq(event.type, ptev_overflow); + ptu_uint_eq(event.variant.overflow.ip, dfix->last_ip.ip); + ptu_int_eq(event.has_tsc, 1); + ptu_uint_eq(event.tsc, 0x1000); + break; + } + + return ptu_passed(); +} + +static struct ptunit_result +sync_ovf_event_cutoff_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + uint64_t addr; + int errcode; + + pt_encode_psb(encoder); + pt_encode_ovf(encoder); + + ptu_check(cutoff, decoder, encoder); + + errcode = pt_qry_sync_forward(decoder, &addr); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result time_null_fail(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + uint64_t tsc; + int errcode; + + errcode = pt_qry_time(NULL, NULL, NULL, NULL); + ptu_int_eq(errcode, -pte_invalid); + + errcode = pt_qry_time(decoder, NULL, NULL, NULL); + ptu_int_eq(errcode, -pte_invalid); + + errcode = pt_qry_time(NULL, &tsc, NULL, NULL); + ptu_int_eq(errcode, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result time_initial(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + uint64_t tsc; + int errcode; + + errcode = pt_qry_time(decoder, &tsc, NULL, NULL); + ptu_int_eq(errcode, -pte_no_time); + + return ptu_passed(); +} + +static struct ptunit_result time(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + uint64_t tsc, exp; + int errcode; + + exp = 0x11223344556677ull; + + decoder->time.have_tsc = 1; + decoder->time.tsc = exp; + + errcode = pt_qry_time(decoder, &tsc, NULL, NULL); + ptu_int_eq(errcode, 0); + ptu_uint_eq(tsc, exp); + + return ptu_passed(); +} + +static struct ptunit_result cbr_null(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + uint32_t cbr; + int errcode; + + errcode = pt_qry_core_bus_ratio(NULL, NULL); + ptu_int_eq(errcode, -pte_invalid); + + errcode = pt_qry_core_bus_ratio(decoder, NULL); + ptu_int_eq(errcode, -pte_invalid); + + errcode = pt_qry_core_bus_ratio(NULL, &cbr); + ptu_int_eq(errcode, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result cbr_initial(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + uint32_t cbr; + int errcode; + + errcode = pt_qry_core_bus_ratio(decoder, &cbr); + ptu_int_eq(errcode, -pte_no_cbr); + + return ptu_passed(); +} + +static struct ptunit_result cbr(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + uint32_t cbr; + int errcode; + + decoder->time.have_cbr = 1; + decoder->time.cbr = 42; + + errcode = pt_qry_core_bus_ratio(decoder, &cbr); + ptu_int_eq(errcode, 0); + ptu_uint_eq(cbr, 42); + + return ptu_passed(); +} + +static struct ptunit_result ptu_dfix_init(struct ptu_decoder_fixture *dfix) +{ + struct pt_config *config = &dfix->config; + int errcode; + + (void) memset(dfix->buffer, 0, sizeof(dfix->buffer)); + + pt_config_init(config); + + config->begin = dfix->buffer; + config->end = dfix->buffer + sizeof(dfix->buffer); + + errcode = pt_encoder_init(&dfix->encoder, config); + ptu_int_eq(errcode, 0); + + errcode = pt_qry_decoder_init(&dfix->decoder, config); + ptu_int_eq(errcode, 0); + + dfix->decoder.ip.ip = pt_dfix_bad_ip; + dfix->decoder.ip.have_ip = 1; + dfix->decoder.ip.suppressed = 0; + + dfix->last_ip = dfix->decoder.ip; + + if (dfix->header) + dfix->header(dfix); + + return ptu_passed(); +} + +static struct ptunit_result ptu_dfix_fini(struct ptu_decoder_fixture *dfix) +{ + pt_qry_decoder_fini(&dfix->decoder); + pt_encoder_fini(&dfix->encoder); + + return ptu_passed(); +} + +/* Synchronize the decoder at the beginnig of an empty buffer. */ +static struct ptunit_result +ptu_dfix_header_sync(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + + /* Synchronize the decoder at the beginning of the buffer. */ + decoder->pos = decoder->config.begin; + + return ptu_passed(); +} + +/* Synchronize the decoder at the beginnig of a buffer containing packets that + * should be skipped for unconditional indirect branch queries. + */ +static struct ptunit_result +ptu_dfix_header_indir(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + + pt_encode_pad(encoder); + pt_encode_cbr(encoder, 1); + pt_encode_pad(encoder); + pt_encode_tsc(encoder, 0); + + /* Synchronize the decoder at the beginning of the buffer. */ + decoder->pos = decoder->config.begin; + + return ptu_passed(); +} + +/* Synchronize the decoder at the beginnig of a buffer containing packets that + * should be skipped for unconditional indirect branch queries including a PSB. + */ +static struct ptunit_result +ptu_dfix_header_indir_psb(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + + /* The psb must be empty since the tests won't skip status events. + * On the other hand, we do need to provide an address since tests + * may want to update last-ip, which requires a last-ip, of course. + */ + pt_encode_pad(encoder); + pt_encode_tsc(encoder, 0); + pt_encode_psb(encoder); + pt_encode_cbr(encoder, 1); + pt_encode_pad(encoder); + pt_encode_tsc(encoder, 0); + pt_encode_fup(encoder, pt_dfix_sext_ip, pt_ipc_sext_48); + pt_encode_mnt(encoder, 0ull); + pt_encode_psbend(encoder); + pt_encode_cbr(encoder, 1); + pt_encode_pad(encoder); + pt_encode_mnt(encoder, 0ull); + + /* Synchronize the decoder at the beginning of the buffer. */ + decoder->pos = decoder->config.begin; + + return ptu_passed(); +} + +/* Synchronize the decoder at the beginnig of a buffer containing packets that + * should be skipped for conditional branch queries. + */ +static struct ptunit_result +ptu_dfix_header_cond(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + + /* The psb must be empty since the tests won't skip status events. + * On the other hand, we do need to provide an address since tests + * may want to update last-ip, which requires a last-ip, of course. + */ + pt_encode_pad(encoder); + pt_encode_cbr(encoder, 1); + pt_encode_psb(encoder); + pt_encode_tsc(encoder, 0); + pt_encode_pad(encoder); + pt_encode_fup(encoder, pt_dfix_sext_ip, pt_ipc_sext_48); + pt_encode_mnt(encoder, 0ull); + pt_encode_psbend(encoder); + pt_encode_pad(encoder); + pt_encode_tsc(encoder, 0); + pt_encode_pad(encoder); + pt_encode_mnt(encoder, 0ull); + + /* Synchronize the decoder at the beginning of the buffer. */ + decoder->pos = decoder->config.begin; + + return ptu_passed(); +} + +/* Synchronize the decoder at the beginnig of a buffer containing packets that + * should be skipped for event queries. + */ +static struct ptunit_result +ptu_dfix_header_event(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + + pt_encode_pad(encoder); + pt_encode_cbr(encoder, 1); + pt_encode_pad(encoder); + pt_encode_tsc(encoder, 0x1000); + + /* Synchronize the decoder at the beginning of the buffer. */ + decoder->pos = decoder->config.begin; + + return ptu_passed(); +} + +/* Synchronize the decoder at the beginnig of a buffer containing packets that + * should be skipped for event queries including a PSB. + */ +static struct ptunit_result +ptu_dfix_header_event_psb(struct ptu_decoder_fixture *dfix) +{ + struct pt_query_decoder *decoder = &dfix->decoder; + struct pt_encoder *encoder = &dfix->encoder; + + /* The psb must be empty since the tests won't skip status events. + * On the other hand, we do need to provide an address since tests + * may want to update last-ip, which requires a last-ip, of course. + */ + pt_encode_pad(encoder); + pt_encode_tsc(encoder, 0); + pt_encode_psb(encoder); + pt_encode_cbr(encoder, 1); + pt_encode_pad(encoder); + pt_encode_tsc(encoder, 0x1000); + pt_encode_fup(encoder, pt_dfix_sext_ip, pt_ipc_sext_48); + pt_encode_psbend(encoder); + pt_encode_cbr(encoder, 1); + pt_encode_pad(encoder); + + /* Synchronize the decoder at the beginning of the buffer. */ + decoder->pos = decoder->config.begin; + + return ptu_passed(); +} + +static struct ptu_decoder_fixture dfix_raw; +static struct ptu_decoder_fixture dfix_empty; +static struct ptu_decoder_fixture dfix_indir; +static struct ptu_decoder_fixture dfix_indir_psb; +static struct ptu_decoder_fixture dfix_cond; +static struct ptu_decoder_fixture dfix_event; +static struct ptu_decoder_fixture dfix_event_psb; + +static void init_fixtures(void) +{ + dfix_raw.init = ptu_dfix_init; + dfix_raw.fini = ptu_dfix_fini; + + dfix_empty = dfix_raw; + dfix_empty.header = ptu_dfix_header_sync; + + dfix_indir = dfix_raw; + dfix_indir.header = ptu_dfix_header_indir; + + dfix_indir_psb = dfix_raw; + dfix_indir_psb.header = ptu_dfix_header_indir_psb; + + dfix_cond = dfix_raw; + dfix_cond.header = ptu_dfix_header_cond; + + dfix_event = dfix_raw; + dfix_event.header = ptu_dfix_header_event; + + dfix_event_psb = dfix_raw; + dfix_event_psb.header = ptu_dfix_header_event_psb; +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + + init_fixtures(); + + suite = ptunit_mk_suite(argc, argv); + + ptu_run_f(suite, indir_not_synced, dfix_raw); + ptu_run_f(suite, cond_not_synced, dfix_raw); + ptu_run_f(suite, event_not_synced, dfix_raw); + + ptu_run_f(suite, sync_backward, dfix_raw); + ptu_run_f(suite, sync_backward_empty_end, dfix_raw); + ptu_run_f(suite, sync_backward_empty_mid, dfix_raw); + ptu_run_f(suite, sync_backward_empty_begin, dfix_raw); + ptu_run_f(suite, decode_sync_backward, dfix_raw); + + ptu_run_f(suite, indir_null, dfix_empty); + ptu_run_f(suite, indir_empty, dfix_empty); + ptu_run_fp(suite, indir, dfix_empty, pt_ipc_suppressed); + ptu_run_fp(suite, indir, dfix_empty, pt_ipc_update_16); + ptu_run_fp(suite, indir, dfix_empty, pt_ipc_update_32); + ptu_run_fp(suite, indir, dfix_empty, pt_ipc_update_48); + ptu_run_fp(suite, indir, dfix_empty, pt_ipc_sext_48); + ptu_run_fp(suite, indir, dfix_empty, pt_ipc_full); + ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_suppressed); + ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_update_16); + ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_update_32); + ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_update_48); + ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_sext_48); + ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_full); + ptu_run_f(suite, indir_cutoff_fail, dfix_empty); + ptu_run_f(suite, indir_skip_tnt_fail, dfix_empty); + ptu_run_f(suite, indir_skip_tip_pge_fail, dfix_empty); + ptu_run_f(suite, indir_skip_tip_pgd_fail, dfix_empty); + ptu_run_f(suite, indir_skip_fup_tip_fail, dfix_empty); + ptu_run_f(suite, indir_skip_fup_tip_pgd_fail, dfix_empty); + + ptu_run_fp(suite, indir, dfix_indir, pt_ipc_suppressed); + ptu_run_fp(suite, indir, dfix_indir, pt_ipc_update_16); + ptu_run_fp(suite, indir, dfix_indir, pt_ipc_update_32); + ptu_run_fp(suite, indir, dfix_indir, pt_ipc_update_48); + ptu_run_fp(suite, indir, dfix_indir, pt_ipc_sext_48); + ptu_run_fp(suite, indir, dfix_indir, pt_ipc_full); + ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_suppressed); + ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_update_16); + ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_update_32); + ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_update_48); + ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_sext_48); + ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_full); + ptu_run_f(suite, indir_cutoff_fail, dfix_indir); + ptu_run_f(suite, indir_skip_tnt_fail, dfix_indir); + ptu_run_f(suite, indir_skip_tip_pge_fail, dfix_indir); + ptu_run_f(suite, indir_skip_tip_pgd_fail, dfix_indir); + ptu_run_f(suite, indir_skip_fup_tip_fail, dfix_indir); + ptu_run_f(suite, indir_skip_fup_tip_pgd_fail, dfix_indir); + + ptu_run_fp(suite, indir, dfix_indir_psb, pt_ipc_suppressed); + ptu_run_fp(suite, indir, dfix_indir_psb, pt_ipc_sext_48); + ptu_run_fp(suite, indir, dfix_indir_psb, pt_ipc_full); + ptu_run_fp(suite, indir_tnt, dfix_indir_psb, pt_ipc_suppressed); + ptu_run_fp(suite, indir_tnt, dfix_indir_psb, pt_ipc_sext_48); + ptu_run_fp(suite, indir_tnt, dfix_indir_psb, pt_ipc_full); + ptu_run_f(suite, indir_cutoff_fail, dfix_indir_psb); + ptu_run_f(suite, indir_skip_tnt_fail, dfix_indir_psb); + ptu_run_f(suite, indir_skip_tip_pge_fail, dfix_indir_psb); + ptu_run_f(suite, indir_skip_tip_pgd_fail, dfix_indir_psb); + ptu_run_f(suite, indir_skip_fup_tip_fail, dfix_indir_psb); + ptu_run_f(suite, indir_skip_fup_tip_pgd_fail, dfix_indir_psb); + + ptu_run_f(suite, cond_null, dfix_empty); + ptu_run_f(suite, cond_empty, dfix_empty); + ptu_run_f(suite, cond, dfix_empty); + ptu_run_f(suite, cond_skip_tip_fail, dfix_empty); + ptu_run_f(suite, cond_skip_tip_pge_fail, dfix_empty); + ptu_run_f(suite, cond_skip_tip_pgd_fail, dfix_empty); + ptu_run_f(suite, cond_skip_fup_tip_fail, dfix_empty); + ptu_run_f(suite, cond_skip_fup_tip_pgd_fail, dfix_empty); + + ptu_run_f(suite, cond, dfix_cond); + ptu_run_f(suite, cond_skip_tip_fail, dfix_cond); + ptu_run_f(suite, cond_skip_tip_pge_fail, dfix_cond); + ptu_run_f(suite, cond_skip_tip_pgd_fail, dfix_cond); + ptu_run_f(suite, cond_skip_fup_tip_fail, dfix_cond); + ptu_run_f(suite, cond_skip_fup_tip_pgd_fail, dfix_cond); + + ptu_run_f(suite, event_null, dfix_empty); + ptu_run_f(suite, event_bad_size, dfix_empty); + ptu_run_f(suite, event_small_size, dfix_empty); + ptu_run_f(suite, event_big_size, dfix_empty); + ptu_run_f(suite, event_empty, dfix_empty); + ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_suppressed, 0); + ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_update_16, 0); + ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_update_32, 0); + ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_update_48, 0); + ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_sext_48, 0); + ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_full, 0); + ptu_run_f(suite, event_enabled_cutoff_fail, dfix_empty); + ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_suppressed, 0); + ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_update_16, 0); + ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_update_32, 0); + ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_update_48, 0); + ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_sext_48, 0); + ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_full, 0); + ptu_run_f(suite, event_disabled_cutoff_fail, dfix_empty); + ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_suppressed, + 0); + ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_update_16, + 0); + ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_update_32, + 0); + ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_update_48, + 0); + ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_sext_48, 0); + ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_full, 0); + ptu_run_f(suite, event_async_disabled_suppressed_fail, dfix_empty); + ptu_run_f(suite, event_async_disabled_cutoff_fail_a, dfix_empty); + ptu_run_f(suite, event_async_disabled_cutoff_fail_b, dfix_empty); + ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_suppressed, 0); + ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_update_16, 0); + ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_update_32, 0); + ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_update_48, 0); + ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_sext_48, 0); + ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_full, 0); + ptu_run_f(suite, event_async_branch_suppressed_fail, dfix_empty); + ptu_run_f(suite, event_async_branch_cutoff_fail_a, dfix_empty); + ptu_run_f(suite, event_async_branch_cutoff_fail_b, dfix_empty); + ptu_run_fp(suite, event_paging, dfix_empty, 0, 0); + ptu_run_fp(suite, event_paging, dfix_empty, pt_pl_pip_nr, 0); + ptu_run_f(suite, event_paging_cutoff_fail, dfix_empty); + ptu_run_fp(suite, event_async_paging, dfix_empty, 0, 0); + ptu_run_fp(suite, event_async_paging, dfix_empty, pt_pl_pip_nr, 0); + ptu_run_fp(suite, event_async_paging_suppressed, dfix_empty, 0, 0); + ptu_run_fp(suite, event_async_paging_suppressed, dfix_empty, + pt_pl_pip_nr, 0); + ptu_run_f(suite, event_async_paging_cutoff_fail, dfix_empty); + ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_suppressed, 0); + ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_update_16, 0); + ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_update_32, 0); + ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_update_48, 0); + ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_sext_48, 0); + ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_full, 0); + ptu_run_f(suite, event_overflow_fup_cutoff_fail, dfix_empty); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, + pt_ipc_suppressed, 0); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_update_16, + 0); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_update_32, + 0); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_update_48, + 0); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_sext_48, + 0); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_full, + 0); + ptu_run_f(suite, event_overflow_cutoff_fail, dfix_empty); + ptu_run_fp(suite, event_stop, dfix_empty, 0); + ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_suppressed, + 0); + ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_update_16, 0); + ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_update_32, 0); + ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_update_48, 0); + ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_sext_48, 0); + ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_full, 0); + ptu_run_f(suite, event_exec_mode_tip_cutoff_fail, dfix_empty); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty, + pt_ipc_suppressed, 0); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty, + pt_ipc_update_16, 0); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty, + pt_ipc_update_32, 0); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty, + pt_ipc_update_48, 0); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty, pt_ipc_sext_48, + 0); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty, pt_ipc_full, + 0); + ptu_run_f(suite, event_exec_mode_tip_pge_cutoff_fail, dfix_empty); + ptu_run_f(suite, event_exec_mode_cutoff_fail, dfix_empty); + ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_suppressed, + pt_mob_tsx_intx, 0); + ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_update_16, 0, 0); + ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_update_32, + pt_mob_tsx_intx, 0); + ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_update_48, + pt_mob_tsx_intx, 0); + ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_sext_48, 0, 0); + ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_full, 0, 0); + ptu_run_f(suite, event_tsx_fup_cutoff_fail, dfix_empty); + ptu_run_f(suite, event_tsx_cutoff_fail, dfix_empty); + ptu_run_f(suite, event_skip_tip_fail, dfix_empty); + ptu_run_f(suite, event_skip_tnt_8_fail, dfix_empty); + ptu_run_f(suite, event_skip_tnt_64_fail, dfix_empty); + ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_suppressed); + ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_update_16); + ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_update_32); + ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_update_48); + ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_sext_48); + ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_full); + ptu_run_f(suite, sync_event_cutoff_fail, dfix_empty); + ptu_run_f(suite, sync_event_incomplete_fail, dfix_empty); + ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_suppressed); + ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_update_16); + ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_update_32); + ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_update_48); + ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_sext_48); + ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_full); + ptu_run_f(suite, sync_ovf_event_cutoff_fail, dfix_empty); + + ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_suppressed, 0x1000); + ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_update_16, 0x1000); + ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_update_32, 0x1000); + ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_update_48, 0x1000); + ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_sext_48, 0x1000); + ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_full, 0x1000); + ptu_run_f(suite, event_enabled_cutoff_fail, dfix_event); + ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_suppressed, + 0x1000); + ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_update_16, 0x1000); + ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_update_32, 0x1000); + ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_update_48, 0x1000); + ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_sext_48, 0x1000); + ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_full, 0x1000); + ptu_run_f(suite, event_disabled_cutoff_fail, dfix_event); + ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_suppressed, + 0x1000); + ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_update_16, + 0x1000); + ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_update_32, + 0x1000); + ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_update_48, + 0x1000); + ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_sext_48, + 0x1000); + ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_full, + 0x1000); + ptu_run_f(suite, event_async_disabled_suppressed_fail, dfix_event); + ptu_run_f(suite, event_async_disabled_cutoff_fail_a, dfix_event); + ptu_run_f(suite, event_async_disabled_cutoff_fail_b, dfix_event); + ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_suppressed, + 0x1000); + ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_update_16, + 0x1000); + ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_update_32, + 0x1000); + ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_update_48, + 0x1000); + ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_sext_48, + 0x1000); + ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_full, + 0x1000); + ptu_run_f(suite, event_async_branch_suppressed_fail, dfix_event); + ptu_run_f(suite, event_async_branch_cutoff_fail_a, dfix_event); + ptu_run_f(suite, event_async_branch_cutoff_fail_b, dfix_event); + ptu_run_fp(suite, event_paging, dfix_event, 0, 0x1000); + ptu_run_fp(suite, event_paging, dfix_event, pt_pl_pip_nr, 0x1000); + ptu_run_f(suite, event_paging_cutoff_fail, dfix_event); + ptu_run_fp(suite, event_async_paging, dfix_event, 0, 0x1000); + ptu_run_fp(suite, event_async_paging, dfix_event, pt_pl_pip_nr, 0x1000); + ptu_run_fp(suite, event_async_paging_suppressed, dfix_event, 0, 0x1000); + ptu_run_fp(suite, event_async_paging_suppressed, dfix_event, + pt_pl_pip_nr, 0x1000); + ptu_run_f(suite, event_async_paging_cutoff_fail, dfix_event); + ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_suppressed, + 0x1000); + ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_update_16, + 0x1000); + ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_update_32, + 0x1000); + ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_update_48, + 0x1000); + ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_sext_48, + 0x1000); + ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_full, + 0x1000); + ptu_run_f(suite, event_overflow_fup_cutoff_fail, dfix_event); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, + pt_ipc_suppressed, 0x1000); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_update_16, + 0x1000); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_update_32, + 0x1000); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_update_48, + 0x1000); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_sext_48, + 0x1000); + ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_full, + 0x1000); + ptu_run_f(suite, event_overflow_cutoff_fail, dfix_event); + ptu_run_fp(suite, event_stop, dfix_event, 0x1000); + ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_suppressed, + 0x1000); + ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_update_16, + 0x1000); + ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_update_32, + 0x1000); + ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_update_48, + 0x1000); + ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_sext_48, + 0x1000); + ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_full, + 0x1000); + ptu_run_f(suite, event_exec_mode_tip_cutoff_fail, dfix_event); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event, + pt_ipc_suppressed, 0x1000); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event, + pt_ipc_update_16, 0x1000); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event, + pt_ipc_update_32, 0x1000); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event, + pt_ipc_update_48, 0x1000); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event, pt_ipc_sext_48, + 0x1000); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event, pt_ipc_full, + 0x1000); + ptu_run_f(suite, event_exec_mode_tip_pge_cutoff_fail, dfix_event); + ptu_run_f(suite, event_exec_mode_cutoff_fail, dfix_event); + ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_suppressed, 0, + 0x1000); + ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_update_16, + pt_mob_tsx_intx, 0x1000); + ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_update_32, 0, + 0x1000); + ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_update_48, 0, + 0x1000); + ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_sext_48, + pt_mob_tsx_intx, 0x1000); + ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_full, + pt_mob_tsx_intx, 0x1000); + ptu_run_f(suite, event_tsx_fup_cutoff_fail, dfix_event); + ptu_run_f(suite, event_tsx_cutoff_fail, dfix_event); + ptu_run_f(suite, event_skip_tip_fail, dfix_event); + ptu_run_f(suite, event_skip_tnt_8_fail, dfix_event); + ptu_run_f(suite, event_skip_tnt_64_fail, dfix_event); + ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_suppressed); + ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_update_16); + ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_update_32); + ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_update_48); + ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_sext_48); + ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_full); + ptu_run_f(suite, sync_event_cutoff_fail, dfix_event); + ptu_run_f(suite, sync_event_incomplete_fail, dfix_event); + ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_suppressed); + ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_update_16); + ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_update_32); + ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_update_48); + ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_sext_48); + ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_full); + ptu_run_f(suite, sync_ovf_event_cutoff_fail, dfix_event); + + ptu_run_fp(suite, event_enabled, dfix_event_psb, pt_ipc_suppressed, + 0x1000); + ptu_run_fp(suite, event_enabled, dfix_event_psb, pt_ipc_sext_48, + 0x1000); + ptu_run_fp(suite, event_enabled, dfix_event_psb, pt_ipc_full, + 0x1000); + ptu_run_f(suite, event_enabled_cutoff_fail, dfix_event_psb); + ptu_run_fp(suite, event_disabled, dfix_event_psb, pt_ipc_suppressed, + 0x1000); + ptu_run_fp(suite, event_disabled, dfix_event_psb, pt_ipc_sext_48, + 0x1000); + ptu_run_fp(suite, event_disabled, dfix_event_psb, pt_ipc_full, + 0x1000); + ptu_run_f(suite, event_disabled_cutoff_fail, dfix_event_psb); + ptu_run_fp(suite, event_async_disabled, dfix_event_psb, + pt_ipc_suppressed, 0x1000); + ptu_run_fp(suite, event_async_disabled, dfix_event_psb, + pt_ipc_update_16, 0x1000); + ptu_run_fp(suite, event_async_disabled, dfix_event_psb, + pt_ipc_update_32, 0x1000); + ptu_run_fp(suite, event_async_disabled, dfix_event_psb, + pt_ipc_update_48, 0x1000); + ptu_run_fp(suite, event_async_disabled, dfix_event_psb, + pt_ipc_sext_48, 0x1000); + ptu_run_fp(suite, event_async_disabled, dfix_event_psb, + pt_ipc_full, 0x1000); + ptu_run_f(suite, event_async_disabled_suppressed_fail, dfix_event_psb); + ptu_run_f(suite, event_async_disabled_cutoff_fail_a, dfix_event_psb); + ptu_run_f(suite, event_async_disabled_cutoff_fail_b, dfix_event_psb); + ptu_run_fp(suite, event_async_branch, dfix_event_psb, + pt_ipc_suppressed, 0x1000); + ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_update_16, + 0x1000); + ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_update_32, + 0x1000); + ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_update_48, + 0x1000); + ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_sext_48, + 0x1000); + ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_full, + 0x1000); + ptu_run_f(suite, event_async_branch_suppressed_fail, dfix_event_psb); + ptu_run_f(suite, event_async_branch_cutoff_fail_a, dfix_event_psb); + ptu_run_f(suite, event_async_branch_cutoff_fail_b, dfix_event_psb); + ptu_run_fp(suite, event_paging, dfix_event_psb, 0, 0x1000); + ptu_run_fp(suite, event_paging, dfix_event_psb, pt_pl_pip_nr, 0x1000); + ptu_run_f(suite, event_paging_cutoff_fail, dfix_event_psb); + ptu_run_fp(suite, event_async_paging, dfix_event_psb, 0, 0x1000); + ptu_run_fp(suite, event_async_paging, dfix_event_psb, pt_pl_pip_nr, + 0x1000); + ptu_run_fp(suite, event_async_paging_suppressed, dfix_event_psb, 0, + 0x1000); + ptu_run_fp(suite, event_async_paging_suppressed, dfix_event_psb, + pt_pl_pip_nr, 0x1000); + ptu_run_f(suite, event_async_paging_cutoff_fail, dfix_event_psb); + ptu_run_f(suite, event_overflow_cutoff_fail, dfix_event_psb); + ptu_run_fp(suite, event_stop, dfix_event_psb, 0x1000); + ptu_run_fp(suite, event_exec_mode_tip, dfix_event_psb, + pt_ipc_suppressed, 0x1000); + ptu_run_fp(suite, event_exec_mode_tip, dfix_event_psb, pt_ipc_sext_48, + 0x1000); + ptu_run_fp(suite, event_exec_mode_tip, dfix_event_psb, pt_ipc_full, + 0x1000); + ptu_run_f(suite, event_exec_mode_tip_cutoff_fail, dfix_event_psb); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event_psb, + pt_ipc_sext_48, 0x1000); + ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event_psb, + pt_ipc_full, 0x1000); + ptu_run_f(suite, event_exec_mode_tip_pge_cutoff_fail, dfix_event_psb); + ptu_run_f(suite, event_exec_mode_cutoff_fail, dfix_event_psb); + ptu_run_fp(suite, event_tsx_fup, dfix_event_psb, pt_ipc_suppressed, 0, + 0x1000); + ptu_run_fp(suite, event_tsx_fup, dfix_event_psb, pt_ipc_sext_48, + pt_mob_tsx_intx, 0x1000); + ptu_run_fp(suite, event_tsx_fup, dfix_event_psb, pt_ipc_full, + pt_mob_tsx_intx, 0x1000); + ptu_run_f(suite, event_tsx_fup_cutoff_fail, dfix_event_psb); + ptu_run_f(suite, event_tsx_cutoff_fail, dfix_event_psb); + ptu_run_f(suite, event_skip_tip_fail, dfix_event_psb); + ptu_run_f(suite, event_skip_tnt_8_fail, dfix_event_psb); + ptu_run_f(suite, event_skip_tnt_64_fail, dfix_event_psb); + + ptu_run_f(suite, time_null_fail, dfix_empty); + ptu_run_f(suite, time_initial, dfix_empty); + ptu_run_f(suite, time, dfix_empty); + + ptu_run_f(suite, cbr_null, dfix_empty); + ptu_run_f(suite, cbr_initial, dfix_empty); + ptu_run_f(suite, cbr, dfix_empty); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-retstack.c b/libipt/test/src/ptunit-retstack.c new file mode 100644 index 0000000..364fde8 --- /dev/null +++ b/libipt/test/src/ptunit-retstack.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_retstack.h" + +#include "intel-pt.h" + + +static struct ptunit_result init(void) +{ + struct pt_retstack retstack; + int status; + + memset(&retstack, 0xcd, sizeof(retstack)); + + pt_retstack_init(&retstack); + + status = pt_retstack_is_empty(&retstack); + ptu_int_ne(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result init_null(void) +{ + pt_retstack_init(NULL); + + return ptu_passed(); +} + +static struct ptunit_result query(void) +{ + struct pt_retstack retstack; + uint64_t ip; + int status; + + pt_retstack_init(&retstack); + + status = pt_retstack_push(&retstack, 0x42ull); + ptu_int_eq(status, 0); + + status = pt_retstack_is_empty(&retstack); + ptu_int_eq(status, 0); + + status = pt_retstack_pop(&retstack, &ip); + ptu_int_eq(status, 0); + ptu_uint_eq(ip, 0x42ull); + + status = pt_retstack_is_empty(&retstack); + ptu_int_ne(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result query_empty(void) +{ + struct pt_retstack retstack; + uint64_t ip; + int status; + + pt_retstack_init(&retstack); + + ip = 0x42ull; + status = pt_retstack_pop(&retstack, &ip); + ptu_int_eq(status, -pte_retstack_empty); + ptu_uint_eq(ip, 0x42ull); + + return ptu_passed(); +} + +static struct ptunit_result query_null(void) +{ + uint64_t ip; + int status; + + ip = 0x42ull; + status = pt_retstack_pop(NULL, &ip); + ptu_int_eq(status, -pte_invalid); + ptu_uint_eq(ip, 0x42ull); + + return ptu_passed(); +} + +static struct ptunit_result pop(void) +{ + struct pt_retstack retstack; + int status; + + pt_retstack_init(&retstack); + + status = pt_retstack_push(&retstack, 0x42ull); + ptu_int_eq(status, 0); + + status = pt_retstack_is_empty(&retstack); + ptu_int_eq(status, 0); + + status = pt_retstack_pop(&retstack, NULL); + ptu_int_eq(status, 0); + + status = pt_retstack_is_empty(&retstack); + ptu_int_ne(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result pop_empty(void) +{ + struct pt_retstack retstack; + int status; + + pt_retstack_init(&retstack); + + status = pt_retstack_pop(&retstack, NULL); + ptu_int_eq(status, -pte_retstack_empty); + + return ptu_passed(); +} + +static struct ptunit_result pop_null(void) +{ + int status; + + status = pt_retstack_pop(NULL, NULL); + ptu_int_eq(status, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result full(void) +{ + struct pt_retstack retstack; + uint64_t ip, idx; + int status; + + pt_retstack_init(&retstack); + + for (idx = 0; idx < pt_retstack_size; ++idx) { + status = pt_retstack_push(&retstack, idx); + ptu_int_eq(status, 0); + } + + status = pt_retstack_is_empty(&retstack); + ptu_int_eq(status, 0); + + for (idx = pt_retstack_size; idx > 0;) { + idx -= 1; + + status = pt_retstack_pop(&retstack, &ip); + ptu_int_eq(status, 0); + ptu_uint_eq(ip, idx); + } + + status = pt_retstack_is_empty(&retstack); + ptu_int_ne(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result overflow(void) +{ + struct pt_retstack retstack; + uint64_t ip, idx; + int status; + + pt_retstack_init(&retstack); + + for (idx = 0; idx <= pt_retstack_size; ++idx) { + status = pt_retstack_push(&retstack, idx); + ptu_int_eq(status, 0); + } + + status = pt_retstack_is_empty(&retstack); + ptu_int_eq(status, 0); + + for (idx = pt_retstack_size; idx > 0; --idx) { + status = pt_retstack_pop(&retstack, &ip); + ptu_int_eq(status, 0); + ptu_uint_eq(ip, idx); + } + + status = pt_retstack_is_empty(&retstack); + ptu_int_ne(status, 0); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, init); + ptu_run(suite, init_null); + ptu_run(suite, query); + ptu_run(suite, query_empty); + ptu_run(suite, query_null); + ptu_run(suite, pop); + ptu_run(suite, pop_empty); + ptu_run(suite, pop_null); + ptu_run(suite, full); + ptu_run(suite, overflow); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-section-file.c b/libipt/test/src/ptunit-section-file.c new file mode 100644 index 0000000..b7ba20f --- /dev/null +++ b/libipt/test/src/ptunit-section-file.c @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_section.h" +#include "pt_section_file.h" + +#include "intel-pt.h" + +#include +#include + + +/* This is a variation of ptunit-section.c. + * + * We provide pt_section_map() et.al. that are normally provided by mmap-based + * section implementations. Our implementation falls back to file-based + * sections so we're able to test them. + * + * The actual test is in ptunit-section.c. + */ + +/* The file status used for detecting changes to a file between unmap and map. + * + * In our case, the changes always affect the size of the file. + */ +struct pt_file_status { + /* The size in bytes. */ + long size; +}; + +int pt_section_mk_status(void **pstatus, uint64_t *psize, const char *filename) +{ + struct pt_file_status *status; + FILE *file; + long size; + int errcode; + + if (!pstatus || !psize) + return -pte_internal; + + file = fopen(filename, "rb"); + if (!file) + return -pte_bad_image; + + errcode = fseek(file, 0, SEEK_END); + if (errcode) { + errcode = -pte_bad_image; + goto out_file; + } + + size = ftell(file); + if (size < 0) { + errcode = -pte_bad_image; + goto out_file; + } + + status = malloc(sizeof(*status)); + if (!status) { + errcode = -pte_nomem; + goto out_file; + } + + status->size = size; + + *pstatus = status; + *psize = (uint64_t) size; + + errcode = 0; + +out_file: + fclose(file); + return errcode; +} + +int pt_section_map(struct pt_section *section) +{ + struct pt_file_status *status; + const char *filename; + uint16_t mcount; + FILE *file; + long size; + int errcode; + + if (!section) + return -pte_internal; + + errcode = pt_section_lock(section); + if (errcode < 0) + return errcode; + + mcount = section->mcount + 1; + if (mcount > 1) { + section->mcount = mcount; + return pt_section_unlock(section); + } + + errcode = -pte_internal; + if (!mcount) + goto out_unlock; + + if (section->mapping) + goto out_unlock; + + filename = section->filename; + if (!filename) + goto out_unlock; + + status = section->status; + if (!status) + goto out_unlock; + + errcode = -pte_bad_image; + file = fopen(filename, "rb"); + if (!file) + goto out_unlock; + + errcode = fseek(file, 0, SEEK_END); + if (errcode) { + errcode = -pte_bad_image; + goto out_file; + } + + errcode = -pte_bad_image; + size = ftell(file); + if (size < 0) + goto out_file; + + if (size != status->size) + goto out_file; + + /* We need to keep the file open on success. It will be closed when + * the section is unmapped. + */ + errcode = pt_sec_file_map(section, file); + if (!errcode) { + section->mcount = 1; + return pt_section_unlock(section); + } + +out_file: + fclose(file); + +out_unlock: + (void) pt_section_unlock(section); + return errcode; +} diff --git a/libipt/test/src/ptunit-section.c b/libipt/test/src/ptunit-section.c new file mode 100644 index 0000000..dfb1c07 --- /dev/null +++ b/libipt/test/src/ptunit-section.c @@ -0,0 +1,1029 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit_threads.h" +#include "ptunit_mkfile.h" + +#include "pt_section.h" +#include "pt_block_cache.h" + +#include "intel-pt.h" + +#include +#include + + +struct pt_block_cache *pt_bcache_alloc(uint64_t nentries) +{ + struct pt_block_cache *bcache; + + if (!nentries || (UINT32_MAX < nentries)) + return NULL; + + /* The cache is not really used by tests. It suffices to allocate only + * the cache struct with the single default entry. + * + * We still set the number of entries to the requested size. + */ + bcache = malloc(sizeof(*bcache)); + if (bcache) + bcache->nentries = (uint32_t) nentries; + + return bcache; +} + +void pt_bcache_free(struct pt_block_cache *bcache) +{ + free(bcache); +} + +/* A test fixture providing a temporary file and an initially NULL section. */ +struct section_fixture { + /* Threading support. */ + struct ptunit_thrd_fixture thrd; + + /* A temporary file name. */ + char *name; + + /* That file opened for writing. */ + FILE *file; + + /* The section. */ + struct pt_section *section; + + /* A cloned section. */ + struct pt_section *clone; + + /* The test fixture initialization and finalization functions. */ + struct ptunit_result (*init)(struct section_fixture *); + struct ptunit_result (*fini)(struct section_fixture *); +}; + +enum { +#if defined(FEATURE_THREADS) + + num_threads = 4, + +#endif /* defined(FEATURE_THREADS) */ + + num_work = 0x4000 +}; + +static struct ptunit_result sfix_write_aux(struct section_fixture *sfix, + const uint8_t *buffer, size_t size) +{ + size_t written; + + written = fwrite(buffer, 1, size, sfix->file); + ptu_uint_eq(written, size); + + fflush(sfix->file); + + return ptu_passed(); +} + +#define sfix_write(sfix, buffer) \ + ptu_check(sfix_write_aux, sfix, buffer, sizeof(buffer)) + +static struct ptunit_result create(struct section_fixture *sfix) +{ + const char *name; + uint8_t bytes[] = { 0xcc, 0xcc, 0xcc, 0xcc, 0xcc }; + uint64_t offset, size; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + name = pt_section_filename(sfix->section); + ptu_str_eq(name, sfix->name); + + offset = pt_section_offset(sfix->section); + ptu_uint_eq(offset, 0x1ull); + + size = pt_section_size(sfix->section); + ptu_uint_eq(size, 0x3ull); + + return ptu_passed(); +} + +static struct ptunit_result create_bad_offset(struct section_fixture *sfix) +{ + sfix->section = pt_mk_section(sfix->name, 0x10ull, 0x0ull); + ptu_null(sfix->section); + + return ptu_passed(); +} + +static struct ptunit_result create_truncated(struct section_fixture *sfix) +{ + const char *name; + uint8_t bytes[] = { 0xcc, 0xcc, 0xcc, 0xcc, 0xcc }; + uint64_t offset, size; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, UINT64_MAX); + ptu_ptr(sfix->section); + + name = pt_section_filename(sfix->section); + ptu_str_eq(name, sfix->name); + + offset = pt_section_offset(sfix->section); + ptu_uint_eq(offset, 0x1ull); + + size = pt_section_size(sfix->section); + ptu_uint_eq(size, sizeof(bytes) - 1); + + return ptu_passed(); +} + +static struct ptunit_result create_empty(struct section_fixture *sfix) +{ + sfix->section = pt_mk_section(sfix->name, 0x0ull, 0x10ull); + ptu_null(sfix->section); + + return ptu_passed(); +} + +static struct ptunit_result clone_null(void) +{ + struct pt_section *section; + int errcode; + + section = NULL; + + errcode = pt_section_clone(NULL, section, 0ull, 1ull); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_section_clone(§ion, NULL, 0ull, 1ull); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result filename_null(void) +{ + const char *name; + + name = pt_section_filename(NULL); + ptu_null(name); + + return ptu_passed(); +} + +static struct ptunit_result size_null(void) +{ + uint64_t size; + + size = pt_section_size(NULL); + ptu_uint_eq(size, 0ull); + + return ptu_passed(); +} + +static struct ptunit_result offset_null(void) +{ + uint64_t offset; + + offset = pt_section_offset(NULL); + ptu_uint_eq(offset, 0ull); + + return ptu_passed(); +} + +static struct ptunit_result get_null(void) +{ + int errcode; + + errcode = pt_section_get(NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result put_null(void) +{ + int errcode; + + errcode = pt_section_put(NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result map_null(void) +{ + int errcode; + + errcode = pt_section_map(NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result unmap_null(void) +{ + int errcode; + + errcode = pt_section_unmap(NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result cache_null(void) +{ + struct pt_block_cache *bcache; + + bcache = pt_section_bcache(NULL); + ptu_null(bcache); + + return ptu_passed(); +} + +static struct ptunit_result get_overflow(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + sfix->section->ucount = UINT16_MAX; + + errcode = pt_section_get(sfix->section); + ptu_int_eq(errcode, -pte_internal); + + sfix->section->ucount = 1; + + return ptu_passed(); +} + +static struct ptunit_result map_change(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + sfix_write(sfix, bytes); + + errcode = pt_section_map(sfix->section); + ptu_int_eq(errcode, -pte_bad_image); + + return ptu_passed(); +} + +static struct ptunit_result map_put(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + errcode = pt_section_map(sfix->section); + ptu_int_eq(errcode, 0); + + errcode = pt_section_put(sfix->section); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_section_unmap(sfix->section); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result unmap_nomap(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + errcode = pt_section_unmap(sfix->section); + ptu_int_eq(errcode, -pte_nomap); + + return ptu_passed(); +} + +static struct ptunit_result map_overflow(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + sfix->section->mcount = UINT16_MAX; + + errcode = pt_section_map(sfix->section); + ptu_int_eq(errcode, -pte_internal); + + sfix->section->mcount = 0; + + return ptu_passed(); +} + +static struct ptunit_result get_put(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + errcode = pt_section_get(sfix->section); + ptu_int_eq(errcode, 0); + + errcode = pt_section_get(sfix->section); + ptu_int_eq(errcode, 0); + + errcode = pt_section_put(sfix->section); + ptu_int_eq(errcode, 0); + + errcode = pt_section_put(sfix->section); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result map_unmap(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + errcode = pt_section_map(sfix->section); + ptu_int_eq(errcode, 0); + + errcode = pt_section_map(sfix->section); + ptu_int_eq(errcode, 0); + + errcode = pt_section_unmap(sfix->section); + ptu_int_eq(errcode, 0); + + errcode = pt_section_unmap(sfix->section); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result read(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + status = pt_section_map(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, buffer, 2, 0x0ull); + ptu_int_eq(status, 2); + ptu_uint_eq(buffer[0], bytes[1]); + ptu_uint_eq(buffer[1], bytes[2]); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_section_unmap(sfix->section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result read_null(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + uint8_t buffer[] = { 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + status = pt_section_map(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, NULL, 1, 0x0ull); + ptu_int_eq(status, -pte_internal); + ptu_uint_eq(buffer[0], 0xcc); + + status = pt_section_read(NULL, buffer, 1, 0x0ull); + ptu_int_eq(status, -pte_internal); + ptu_uint_eq(buffer[0], 0xcc); + + status = pt_section_unmap(sfix->section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result read_offset(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + status = pt_section_map(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, buffer, 2, 0x1ull); + ptu_int_eq(status, 2); + ptu_uint_eq(buffer[0], bytes[2]); + ptu_uint_eq(buffer[1], bytes[3]); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_section_unmap(sfix->section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result read_truncated(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc, 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + status = pt_section_map(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, buffer, 2, 0x2ull); + ptu_int_eq(status, 1); + ptu_uint_eq(buffer[0], bytes[3]); + ptu_uint_eq(buffer[1], 0xcc); + + status = pt_section_unmap(sfix->section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result read_from_truncated(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc, 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x2ull, 0x10ull); + ptu_ptr(sfix->section); + + status = pt_section_map(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, buffer, 2, 0x1ull); + ptu_int_eq(status, 1); + ptu_uint_eq(buffer[0], bytes[3]); + ptu_uint_eq(buffer[1], 0xcc); + + status = pt_section_unmap(sfix->section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result read_nomem(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + status = pt_section_map(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, buffer, 1, 0x3ull); + ptu_int_eq(status, -pte_nomap); + ptu_uint_eq(buffer[0], 0xcc); + + status = pt_section_unmap(sfix->section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result read_overflow(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + status = pt_section_map(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, buffer, 1, + 0xffffffffffff0000ull); + ptu_int_eq(status, -pte_nomap); + ptu_uint_eq(buffer[0], 0xcc); + + status = pt_section_unmap(sfix->section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result read_overflow_32bit(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + status = pt_section_map(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, buffer, 1, + 0xff00000000ull); + ptu_int_eq(status, -pte_nomap); + ptu_uint_eq(buffer[0], 0xcc); + + status = pt_section_unmap(sfix->section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result read_nomap(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + status = pt_section_read(sfix->section, buffer, 1, 0x0ull); + ptu_int_eq(status, -pte_nomap); + ptu_uint_eq(buffer[0], 0xcc); + + return ptu_passed(); +} + +static struct ptunit_result read_unmap_map(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + status = pt_section_map(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, buffer, 2, 0x0ull); + ptu_int_eq(status, 2); + ptu_uint_eq(buffer[0], bytes[1]); + ptu_uint_eq(buffer[1], bytes[2]); + ptu_uint_eq(buffer[2], 0xcc); + + memset(buffer, 0xcc, sizeof(buffer)); + + status = pt_section_unmap(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, buffer, 2, 0x0ull); + ptu_int_eq(status, -pte_nomap); + ptu_uint_eq(buffer[0], 0xcc); + ptu_uint_eq(buffer[1], 0xcc); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_section_map(sfix->section); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->section, buffer, 2, 0x0ull); + ptu_int_eq(status, 2); + ptu_uint_eq(buffer[0], bytes[1]); + ptu_uint_eq(buffer[1], bytes[2]); + ptu_uint_eq(buffer[2], 0xcc); + + status = pt_section_unmap(sfix->section); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result clone_bad_range(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0xcc }; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x2ull); + ptu_ptr(sfix->section); + + errcode = pt_section_clone(&sfix->clone, sfix->section, 0x0ull, 0x2ull); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_section_clone(&sfix->clone, sfix->section, 0x2ull, 0x2ull); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result clone_head(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0xcc }; + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x2ull); + ptu_ptr(sfix->section); + + status = pt_section_clone(&sfix->clone, sfix->section, 0x1ull, 0x1ull); + ptu_int_eq(status, 0); + + status = pt_section_map(sfix->clone); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->clone, buffer, 2, 0x0ull); + ptu_int_eq(status, 1); + ptu_uint_eq(buffer[0], bytes[1]); + ptu_uint_eq(buffer[1], 0xcc); + + status = pt_section_unmap(sfix->clone); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result clone_tail(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0xcc }; + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int status; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x2ull); + ptu_ptr(sfix->section); + + status = pt_section_clone(&sfix->clone, sfix->section, 0x2ull, 0x1ull); + ptu_int_eq(status, 0); + + status = pt_section_map(sfix->clone); + ptu_int_eq(status, 0); + + status = pt_section_read(sfix->clone, buffer, 2, 0x0ull); + ptu_int_eq(status, 1); + ptu_uint_eq(buffer[0], bytes[2]); + ptu_uint_eq(buffer[1], 0xcc); + + status = pt_section_unmap(sfix->clone); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static int worker(void *arg) +{ + struct section_fixture *sfix; + int it, errcode; + + sfix = arg; + if (!sfix) + return -pte_internal; + + for (it = 0; it < num_work; ++it) { + uint8_t buffer[] = { 0xcc, 0xcc, 0xcc }; + int read; + + errcode = pt_section_get(sfix->section); + if (errcode < 0) + return errcode; + + errcode = pt_section_map(sfix->section); + if (errcode < 0) + goto out_put; + + read = pt_section_read(sfix->section, buffer, 2, 0x0ull); + if (read < 0) + goto out_unmap; + + errcode = -pte_invalid; + if ((read != 2) || (buffer[0] != 0x2) || (buffer[1] != 0x4)) + goto out_unmap; + + errcode = pt_section_unmap(sfix->section); + if (errcode < 0) + goto out_put; + + errcode = pt_section_put(sfix->section); + if (errcode < 0) + return errcode; + } + + return 0; + +out_unmap: + (void) pt_section_unmap(sfix->section); + +out_put: + (void) pt_section_put(sfix->section); + return errcode; +} + +static struct ptunit_result stress(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + +#if defined(FEATURE_THREADS) + { + int thrd; + + for (thrd = 0; thrd < num_threads; ++thrd) + ptu_test(ptunit_thrd_create, &sfix->thrd, worker, sfix); + } +#endif /* defined(FEATURE_THREADS) */ + + errcode = worker(sfix); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result cache(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + struct pt_block_cache *bcache; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + bcache = pt_section_bcache(sfix->section); + ptu_null(bcache); + + errcode = pt_section_map(sfix->section); + ptu_int_eq(errcode, 0); + + bcache = pt_section_bcache(sfix->section); + ptu_ptr(bcache); + ptu_uint_eq(bcache->nentries, sfix->section->size); + + errcode = pt_section_unmap(sfix->section); + ptu_int_eq(errcode, 0); + + bcache = pt_section_bcache(sfix->section); + ptu_null(bcache); + + return ptu_passed(); +} + +static struct ptunit_result cache_disabled(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + struct pt_block_cache *bcache; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + pt_section_disable_bcache(sfix->section); + + errcode = pt_section_map(sfix->section); + ptu_int_eq(errcode, 0); + + bcache = pt_section_bcache(sfix->section); + ptu_null(bcache); + + errcode = pt_section_unmap(sfix->section); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result cache_enable_disable(struct section_fixture *sfix) +{ + uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }; + struct pt_block_cache *bcache; + int errcode; + + sfix_write(sfix, bytes); + + sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull); + ptu_ptr(sfix->section); + + bcache = pt_section_bcache(sfix->section); + ptu_null(bcache); + + errcode = pt_section_map(sfix->section); + ptu_int_eq(errcode, 0); + + pt_section_disable_bcache(sfix->section); + + bcache = pt_section_bcache(sfix->section); + ptu_ptr(bcache); + ptu_uint_eq(bcache->nentries, sfix->section->size); + + errcode = pt_section_unmap(sfix->section); + ptu_int_eq(errcode, 0); + + bcache = pt_section_bcache(sfix->section); + ptu_null(bcache); + + errcode = pt_section_map(sfix->section); + ptu_int_eq(errcode, 0); + + bcache = pt_section_bcache(sfix->section); + ptu_null(bcache); + + errcode = pt_section_unmap(sfix->section); + ptu_int_eq(errcode, 0); + + return ptu_passed(); +} + +static struct ptunit_result sfix_init(struct section_fixture *sfix) +{ + int errcode; + + sfix->section = NULL; + sfix->clone = NULL; + sfix->file = NULL; + sfix->name = NULL; + + errcode = ptunit_mkfile(&sfix->file, &sfix->name, "wb"); + ptu_int_eq(errcode, 0); + + ptu_test(ptunit_thrd_init, &sfix->thrd); + + return ptu_passed(); +} + +static struct ptunit_result sfix_fini(struct section_fixture *sfix) +{ + int thrd; + + ptu_test(ptunit_thrd_fini, &sfix->thrd); + + for (thrd = 0; thrd < sfix->thrd.nthreads; ++thrd) + ptu_int_eq(sfix->thrd.result[thrd], 0); + + if (sfix->section) { + pt_section_put(sfix->section); + sfix->section = NULL; + } + + if (sfix->clone) { + pt_section_put(sfix->clone); + sfix->clone = NULL; + } + + if (sfix->file) { + fclose(sfix->file); + sfix->file = NULL; + + if (sfix->name) + remove(sfix->name); + } + + if (sfix->name) { + free(sfix->name); + sfix->name = NULL; + } + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct section_fixture sfix; + struct ptunit_suite suite; + + sfix.init = sfix_init; + sfix.fini = sfix_fini; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run_f(suite, create, sfix); + ptu_run_f(suite, create_bad_offset, sfix); + ptu_run_f(suite, create_truncated, sfix); + ptu_run_f(suite, create_empty, sfix); + + ptu_run(suite, clone_null); + ptu_run(suite, filename_null); + ptu_run(suite, offset_null); + ptu_run(suite, size_null); + ptu_run(suite, get_null); + ptu_run(suite, put_null); + ptu_run(suite, map_null); + ptu_run(suite, unmap_null); + ptu_run(suite, cache_null); + + ptu_run_f(suite, get_overflow, sfix); + ptu_run_f(suite, map_change, sfix); + ptu_run_f(suite, map_put, sfix); + ptu_run_f(suite, unmap_nomap, sfix); + ptu_run_f(suite, map_overflow, sfix); + ptu_run_f(suite, get_put, sfix); + ptu_run_f(suite, map_unmap, sfix); + ptu_run_f(suite, read, sfix); + ptu_run_f(suite, read_null, sfix); + ptu_run_f(suite, read_offset, sfix); + ptu_run_f(suite, read_truncated, sfix); + ptu_run_f(suite, read_from_truncated, sfix); + ptu_run_f(suite, read_nomem, sfix); + ptu_run_f(suite, read_overflow, sfix); + ptu_run_f(suite, read_overflow_32bit, sfix); + ptu_run_f(suite, read_nomap, sfix); + ptu_run_f(suite, read_unmap_map, sfix); + ptu_run_f(suite, stress, sfix); + + ptu_run_f(suite, clone_bad_range, sfix); + ptu_run_f(suite, clone_head, sfix); + ptu_run_f(suite, clone_tail, sfix); + + ptu_run_f(suite, cache, sfix); + ptu_run_f(suite, cache_disabled, sfix); + ptu_run_f(suite, cache_enable_disable, sfix); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-sync.c b/libipt/test/src/ptunit-sync.c new file mode 100644 index 0000000..a0d1753 --- /dev/null +++ b/libipt/test/src/ptunit-sync.c @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_sync.h" + +#include "intel-pt.h" + + +/* A test fixture for sync tests. */ +struct sync_fixture { + /* The trace buffer. */ + uint8_t buffer[1024]; + + /* A trace configuration. */ + struct pt_config config; + + /* The test fixture initialization and finalization functions. */ + struct ptunit_result (*init)(struct sync_fixture *); + struct ptunit_result (*fini)(struct sync_fixture *); +}; + +static struct ptunit_result sfix_init(struct sync_fixture *sfix) +{ + memset(sfix->buffer, 0xcd, sizeof(sfix->buffer)); + + memset(&sfix->config, 0, sizeof(sfix->config)); + sfix->config.size = sizeof(sfix->config); + sfix->config.begin = sfix->buffer; + sfix->config.end = sfix->buffer + sizeof(sfix->buffer); + + return ptu_passed(); +} + +static void sfix_encode_psb(uint8_t *pos) +{ + int i; + + *pos++ = pt_opc_psb; + *pos++ = pt_ext_psb; + + for (i = 0; i < pt_psb_repeat_count; ++i) { + *pos++ = pt_psb_hi; + *pos++ = pt_psb_lo; + } +} + + +static struct ptunit_result sync_fwd_null(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + errcode = pt_sync_forward(NULL, sfix->config.begin, &sfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_sync_forward(&sync, NULL, &sfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_sync_forward(&sync, sfix->config.begin, NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result sync_bwd_null(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + errcode = pt_sync_backward(NULL, sfix->config.begin, &sfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_sync_backward(&sync, NULL, &sfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_sync_backward(&sync, sfix->config.begin, NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result sync_fwd_empty(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + sfix->config.end = sfix->config.begin; + + errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result sync_bwd_empty(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + sfix->config.end = sfix->config.begin; + + errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result sync_fwd_none(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result sync_bwd_none(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result sync_fwd_here(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + sfix_encode_psb(sfix->config.begin); + + errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config); + ptu_int_eq(errcode, 0); + ptu_ptr_eq(sync, sfix->config.begin); + + return ptu_passed(); +} + +static struct ptunit_result sync_bwd_here(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + sfix_encode_psb(sfix->config.end - ptps_psb); + + errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config); + ptu_int_eq(errcode, 0); + ptu_ptr_eq(sync, sfix->config.end - ptps_psb); + + return ptu_passed(); +} + +static struct ptunit_result sync_fwd(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + sfix_encode_psb(sfix->config.begin + 0x23); + + errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config); + ptu_int_eq(errcode, 0); + ptu_ptr_eq(sync, sfix->config.begin + 0x23); + + return ptu_passed(); +} + +static struct ptunit_result sync_bwd(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + sfix_encode_psb(sfix->config.begin + 0x23); + + errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config); + ptu_int_eq(errcode, 0); + ptu_ptr_eq(sync, sfix->config.begin + 0x23); + + return ptu_passed(); +} + +static struct ptunit_result sync_fwd_past(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + sfix_encode_psb(sfix->config.begin); + + errcode = pt_sync_forward(&sync, sfix->config.begin + ptps_psb, + &sfix->config); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result sync_bwd_past(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + sfix_encode_psb(sfix->config.end - ptps_psb); + + errcode = pt_sync_backward(&sync, sfix->config.end - ptps_psb, + &sfix->config); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result sync_fwd_cutoff(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + sfix_encode_psb(sfix->config.begin); + sfix_encode_psb(sfix->config.end - ptps_psb); + sfix->config.begin += 1; + sfix->config.end -= 1; + + errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +static struct ptunit_result sync_bwd_cutoff(struct sync_fixture *sfix) +{ + const uint8_t *sync; + int errcode; + + sfix_encode_psb(sfix->config.begin); + sfix_encode_psb(sfix->config.end - ptps_psb); + sfix->config.begin += 1; + sfix->config.end -= 1; + + errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config); + ptu_int_eq(errcode, -pte_eos); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct sync_fixture sfix; + struct ptunit_suite suite; + + sfix.init = sfix_init; + sfix.fini = NULL; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run_f(suite, sync_fwd_null, sfix); + ptu_run_f(suite, sync_bwd_null, sfix); + + ptu_run_f(suite, sync_fwd_empty, sfix); + ptu_run_f(suite, sync_bwd_empty, sfix); + + ptu_run_f(suite, sync_fwd_none, sfix); + ptu_run_f(suite, sync_bwd_none, sfix); + + ptu_run_f(suite, sync_fwd_here, sfix); + ptu_run_f(suite, sync_bwd_here, sfix); + + ptu_run_f(suite, sync_fwd, sfix); + ptu_run_f(suite, sync_bwd, sfix); + + ptu_run_f(suite, sync_fwd_past, sfix); + ptu_run_f(suite, sync_bwd_past, sfix); + + ptu_run_f(suite, sync_fwd_cutoff, sfix); + ptu_run_f(suite, sync_bwd_cutoff, sfix); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-time.c b/libipt/test/src/ptunit-time.c new file mode 100644 index 0000000..a11f0cf --- /dev/null +++ b/libipt/test/src/ptunit-time.c @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2014-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_time.h" + +#include "intel-pt.h" + +#include "ptunit.h" + + +/* A time unit test fixture. */ + +struct time_fixture { + /* The configuration to use. */ + struct pt_config config; + + /* The calibration to use. */ + struct pt_time_cal tcal; + + /* The time struct to update. */ + struct pt_time time; + + /* The test fixture initialization and finalization functions. */ + struct ptunit_result (*init)(struct time_fixture *); + struct ptunit_result (*fini)(struct time_fixture *); +}; + +static struct ptunit_result tfix_init(struct time_fixture *tfix) +{ + memset(&tfix->config, 0, sizeof(tfix->config)); + tfix->config.size = sizeof(tfix->config); + tfix->config.cpuid_0x15_eax = 2; + tfix->config.cpuid_0x15_ebx = 1; + tfix->config.mtc_freq = 4; + + pt_tcal_init(&tfix->tcal); + pt_tcal_set_fcr(&tfix->tcal, 0x2ull << pt_tcal_fcr_shr); + + pt_time_init(&tfix->time); + + return ptu_passed(); +} + + +static struct ptunit_result tsc_null(struct time_fixture *tfix) +{ + struct pt_packet_tsc packet; + int errcode; + + errcode = pt_time_update_tsc(NULL, &packet, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_time_update_tsc(&tfix->time, NULL, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result cbr_null(struct time_fixture *tfix) +{ + struct pt_packet_cbr packet; + int errcode; + + errcode = pt_time_update_cbr(NULL, &packet, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_time_update_cbr(&tfix->time, NULL, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result tma_null(struct time_fixture *tfix) +{ + struct pt_packet_tma packet; + int errcode; + + errcode = pt_time_update_tma(NULL, &packet, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_time_update_tma(&tfix->time, NULL, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_time_update_tma(&tfix->time, &packet, NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result mtc_null(struct time_fixture *tfix) +{ + struct pt_packet_mtc packet; + int errcode; + + errcode = pt_time_update_mtc(NULL, &packet, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_time_update_mtc(&tfix->time, NULL, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_time_update_mtc(&tfix->time, &packet, NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result cyc_null(struct time_fixture *tfix) +{ + struct pt_packet_cyc packet; + int errcode; + + errcode = pt_time_update_cyc(NULL, &packet, &tfix->config, 0ull); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_time_update_cyc(&tfix->time, NULL, &tfix->config, 0ull); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_time_update_cyc(&tfix->time, &packet, NULL, 0ull); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result query_tsc_null(struct time_fixture *tfix) +{ + uint64_t tsc; + int errcode; + + errcode = pt_time_query_tsc(NULL, NULL, NULL, &tfix->time); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_time_query_tsc(&tsc, NULL, NULL, NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result query_tsc_none(struct time_fixture *tfix) +{ + uint64_t tsc; + int errcode; + + errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tfix->time); + ptu_int_eq(errcode, -pte_no_time); + + return ptu_passed(); +} + +static struct ptunit_result query_cbr_null(struct time_fixture *tfix) +{ + uint32_t cbr; + int errcode; + + errcode = pt_time_query_cbr(NULL, &tfix->time); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_time_query_cbr(&cbr, NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result query_cbr_none(struct time_fixture *tfix) +{ + uint32_t cbr; + int errcode; + + errcode = pt_time_query_cbr(&cbr, &tfix->time); + ptu_int_eq(errcode, -pte_no_cbr); + + return ptu_passed(); +} + +static struct ptunit_result tcal_cbr_null(struct time_fixture *tfix) +{ + struct pt_packet_cbr packet; + int errcode; + + errcode = pt_tcal_update_cbr(NULL, &packet, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result tcal_mtc_null(struct time_fixture *tfix) +{ + struct pt_packet_mtc packet; + int errcode; + + errcode = pt_tcal_update_mtc(NULL, &packet, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_tcal_update_mtc(&tfix->tcal, NULL, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_tcal_update_mtc(&tfix->tcal, &packet, NULL); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result tcal_cyc_null(struct time_fixture *tfix) +{ + struct pt_packet_cyc packet; + int errcode; + + errcode = pt_tcal_update_cyc(NULL, &packet, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + errcode = pt_tcal_update_cyc(&tfix->tcal, NULL, &tfix->config); + ptu_int_eq(errcode, -pte_internal); + + return ptu_passed(); +} + +static struct ptunit_result tsc(struct time_fixture *tfix) +{ + struct pt_packet_tsc packet; + uint64_t tsc; + uint32_t lost_mtc, lost_cyc; + int errcode; + + packet.tsc = 0xdedededeull; + + errcode = pt_time_update_tsc(&tfix->time, &packet, &tfix->config); + ptu_int_eq(errcode, 0); + + errcode = pt_time_query_tsc(&tsc, &lost_mtc, &lost_cyc, &tfix->time); + ptu_int_eq(errcode, 0); + + ptu_uint_eq(tsc, 0xdedededeull); + ptu_uint_eq(lost_mtc, 0); + ptu_uint_eq(lost_cyc, 0); + + return ptu_passed(); +} + +static struct ptunit_result cbr(struct time_fixture *tfix) +{ + struct pt_packet_cbr packet; + uint32_t cbr; + int errcode; + + packet.ratio = 0x38; + + errcode = pt_time_update_cbr(&tfix->time, &packet, &tfix->config); + ptu_int_eq(errcode, 0); + + errcode = pt_time_query_cbr(&cbr, &tfix->time); + ptu_int_eq(errcode, 0); + + ptu_uint_eq(cbr, 0x38); + + return ptu_passed(); +} + +static struct ptunit_result tma(struct time_fixture *tfix) +{ + struct pt_packet_tma packet; + int errcode; + + packet.ctc = 0xdc; + packet.fc = 0xf; + + errcode = pt_time_update_tma(&tfix->time, &packet, &tfix->config); + ptu_int_eq(errcode, -pte_bad_context); + + return ptu_passed(); +} + +static struct ptunit_result mtc(struct time_fixture *tfix) +{ + struct pt_packet_mtc packet; + uint64_t tsc; + int errcode; + + packet.ctc = 0xdc; + + errcode = pt_time_update_mtc(&tfix->time, &packet, &tfix->config); + ptu_int_eq(errcode, 0); + + errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tfix->time); + ptu_int_eq(errcode, -pte_no_time); + + return ptu_passed(); +} + +static struct ptunit_result cyc(struct time_fixture *tfix) +{ + struct pt_packet_cyc packet; + uint64_t fcr, tsc; + int errcode; + + errcode = pt_tcal_fcr(&fcr, &tfix->tcal); + ptu_int_eq(errcode, 0); + + packet.value = 0xdc; + + errcode = pt_time_update_cyc(&tfix->time, &packet, &tfix->config, fcr); + ptu_int_eq(errcode, 0); + + errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tfix->time); + ptu_int_eq(errcode, -pte_no_time); + + return ptu_passed(); +} + + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + struct time_fixture tfix; + + suite = ptunit_mk_suite(argc, argv); + + tfix.init = tfix_init; + tfix.fini = NULL; + + ptu_run_f(suite, tsc_null, tfix); + ptu_run_f(suite, cbr_null, tfix); + ptu_run_f(suite, tma_null, tfix); + ptu_run_f(suite, mtc_null, tfix); + ptu_run_f(suite, cyc_null, tfix); + + ptu_run_f(suite, query_tsc_null, tfix); + ptu_run_f(suite, query_tsc_none, tfix); + ptu_run_f(suite, query_cbr_null, tfix); + ptu_run_f(suite, query_cbr_none, tfix); + + ptu_run_f(suite, tcal_cbr_null, tfix); + ptu_run_f(suite, tcal_mtc_null, tfix); + ptu_run_f(suite, tcal_cyc_null, tfix); + + ptu_run_f(suite, tsc, tfix); + ptu_run_f(suite, cbr, tfix); + ptu_run_f(suite, tma, tfix); + ptu_run_f(suite, mtc, tfix); + ptu_run_f(suite, cyc, tfix); + + /* The bulk is covered in ptt tests. */ + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/libipt/test/src/ptunit-tnt_cache.c b/libipt/test/src/ptunit-tnt_cache.c new file mode 100644 index 0000000..191eab3 --- /dev/null +++ b/libipt/test/src/ptunit-tnt_cache.c @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include "pt_tnt_cache.h" + +#include "intel-pt.h" + +#include + + +static struct ptunit_result init(void) +{ + struct pt_tnt_cache tnt_cache; + + memset(&tnt_cache, 0xcd, sizeof(tnt_cache)); + + pt_tnt_cache_init(&tnt_cache); + + ptu_uint_eq(tnt_cache.tnt, 0ull); + ptu_uint_eq(tnt_cache.index, 0ull); + + return ptu_passed(); +} + +static struct ptunit_result init_null(void) +{ + pt_tnt_cache_init(NULL); + + return ptu_passed(); +} + +static struct ptunit_result is_empty_initial(void) +{ + struct pt_tnt_cache tnt_cache; + int status; + + pt_tnt_cache_init(&tnt_cache); + + status = pt_tnt_cache_is_empty(&tnt_cache); + ptu_int_eq(status, 1); + + return ptu_passed(); +} + +static struct ptunit_result is_empty_no(void) +{ + struct pt_tnt_cache tnt_cache; + int status; + + tnt_cache.index = 1ull; + + status = pt_tnt_cache_is_empty(&tnt_cache); + ptu_int_eq(status, 0); + + return ptu_passed(); +} + +static struct ptunit_result is_empty_yes(void) +{ + struct pt_tnt_cache tnt_cache; + int status; + + tnt_cache.index = 0ull; + + status = pt_tnt_cache_is_empty(&tnt_cache); + ptu_int_eq(status, 1); + + return ptu_passed(); +} + +static struct ptunit_result is_empty_null(void) +{ + int status; + + status = pt_tnt_cache_is_empty(NULL); + ptu_int_eq(status, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result query_taken(void) +{ + struct pt_tnt_cache tnt_cache; + int status; + + tnt_cache.tnt = 1ull; + tnt_cache.index = 1ull; + + status = pt_tnt_cache_query(&tnt_cache); + ptu_int_eq(status, 1); + ptu_uint_eq(tnt_cache.index, 0); + + return ptu_passed(); +} + +static struct ptunit_result query_not_taken(void) +{ + struct pt_tnt_cache tnt_cache; + int status; + + tnt_cache.tnt = 0ull; + tnt_cache.index = 1ull; + + status = pt_tnt_cache_query(&tnt_cache); + ptu_int_eq(status, 0); + ptu_uint_eq(tnt_cache.index, 0); + + return ptu_passed(); +} + +static struct ptunit_result query_empty(void) +{ + struct pt_tnt_cache tnt_cache; + int status; + + tnt_cache.index = 0ull; + + status = pt_tnt_cache_query(&tnt_cache); + ptu_int_eq(status, -pte_bad_query); + + return ptu_passed(); +} + +static struct ptunit_result query_null(void) +{ + int status; + + status = pt_tnt_cache_query(NULL); + ptu_int_eq(status, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result update_tnt(void) +{ + struct pt_tnt_cache tnt_cache; + struct pt_packet_tnt packet; + int errcode; + + pt_tnt_cache_init(&tnt_cache); + + packet.bit_size = 4ull; + packet.payload = 8ull; + + errcode = pt_tnt_cache_update_tnt(&tnt_cache, &packet, NULL); + ptu_int_eq(errcode, 0); + ptu_uint_eq(tnt_cache.tnt, 8ull); + ptu_uint_eq(tnt_cache.index, 1ull << 3); + + return ptu_passed(); +} + +static struct ptunit_result update_tnt_not_empty(void) +{ + struct pt_tnt_cache tnt_cache; + struct pt_packet_tnt packet; + int errcode; + + tnt_cache.tnt = 42ull; + tnt_cache.index = 12ull; + + errcode = pt_tnt_cache_update_tnt(&tnt_cache, &packet, NULL); + ptu_int_eq(errcode, -pte_bad_context); + ptu_uint_eq(tnt_cache.tnt, 42ull); + ptu_uint_eq(tnt_cache.index, 12ull); + + return ptu_passed(); +} + +static struct ptunit_result update_tnt_null_tnt(void) +{ + struct pt_packet_tnt packet; + int errcode; + + errcode = pt_tnt_cache_update_tnt(NULL, &packet, NULL); + ptu_int_eq(errcode, -pte_invalid); + + return ptu_passed(); +} + +static struct ptunit_result update_tnt_null_packet(void) +{ + struct pt_tnt_cache tnt_cache; + int errcode; + + tnt_cache.tnt = 42ull; + tnt_cache.index = 12ull; + + errcode = pt_tnt_cache_update_tnt(&tnt_cache, NULL, NULL); + ptu_int_eq(errcode, -pte_invalid); + ptu_uint_eq(tnt_cache.tnt, 42ull); + ptu_uint_eq(tnt_cache.index, 12ull); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, init); + ptu_run(suite, init_null); + ptu_run(suite, is_empty_initial); + ptu_run(suite, is_empty_no); + ptu_run(suite, is_empty_yes); + ptu_run(suite, is_empty_null); + ptu_run(suite, query_taken); + ptu_run(suite, query_not_taken); + ptu_run(suite, query_empty); + ptu_run(suite, query_null); + ptu_run(suite, update_tnt); + ptu_run(suite, update_tnt_not_empty); + ptu_run(suite, update_tnt_null_tnt); + ptu_run(suite, update_tnt_null_packet); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/ptdump/CMakeLists.txt b/ptdump/CMakeLists.txt new file mode 100644 index 0000000..c885a5b --- /dev/null +++ b/ptdump/CMakeLists.txt @@ -0,0 +1,51 @@ +# Copyright (c) 2013-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +include_directories( + include + ../libipt/internal/include +) + +set(PTDUMP_FILES + src/ptdump.c + ../libipt/src/pt_last_ip.c + ../libipt/src/pt_cpu.c + ../libipt/src/pt_time.c +) + +if (CMAKE_HOST_UNIX) + set(PTDUMP_FILES ${PTDUMP_FILES} ../libipt/src/posix/pt_cpuid.c) +endif (CMAKE_HOST_UNIX) + +if (CMAKE_HOST_WIN32) + set(PTDUMP_FILES ${PTDUMP_FILES} ../libipt/src/windows/pt_cpuid.c) +endif (CMAKE_HOST_WIN32) + +add_executable(ptdump + ${PTDUMP_FILES} +) + +target_link_libraries(ptdump libipt) diff --git a/ptdump/src/ptdump.c b/ptdump/src/ptdump.c new file mode 100644 index 0000000..b2d9576 --- /dev/null +++ b/ptdump/src/ptdump.c @@ -0,0 +1,1533 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pt_cpu.h" +#include "pt_last_ip.h" +#include "pt_time.h" + +#include "intel-pt.h" + +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) && (_MSC_VER < 1900) +# define snprintf _snprintf_c +#endif + + +struct ptdump_options { + /* Show the current offset in the trace stream. */ + uint32_t show_offset:1; + + /* Show raw packet bytes. */ + uint32_t show_raw_bytes:1; + + /* Show last IP for packets with IP payloads. */ + uint32_t show_last_ip:1; + + /* Show the execution mode on mode.exec. */ + uint32_t show_exec_mode:1; + + /* Keep track of time. */ + uint32_t track_time:1; + + /* Show the estimated TSC for timing related packets. */ + uint32_t show_time:1; + + /* Show time calibration. */ + uint32_t show_tcal:1; + + /* Show timing information as delta to the previous value. */ + uint32_t show_time_as_delta:1; + + /* Quiet mode: Don't print anything but errors. */ + uint32_t quiet:1; + + /* Don't show PAD packets. */ + uint32_t no_pad:1; + + /* Do not try to sync the decoder. */ + uint32_t no_sync:1; + + /* Do not calibrate timing. */ + uint32_t no_tcal:1; + + /* Do not expect wall-clock time. */ + uint32_t no_wall_clock:1; + + /* Don't show timing packets. */ + uint32_t no_timing:1; + + /* Don't show CYC packets and ignore them when tracking time. */ + uint32_t no_cyc:1; +}; + +struct ptdump_buffer { + /* The trace offset. */ + char offset[17]; + + /* The raw packet bytes. */ + char raw[33]; + + /* The packet opcode. */ + char opcode[10]; + + union { + /* The standard packet payload. */ + char standard[25]; + + /* An extended packet payload. */ + char extended[48]; + } payload; + + /* The tracking information. */ + struct { + /* The tracking identifier. */ + char id[5]; + + /* The tracking information. */ + char payload[17]; + } tracking; + + /* A flag telling whether an extended payload is used. */ + uint32_t use_ext_payload:1; + + /* A flag telling whether to skip printing this buffer. */ + uint32_t skip:1; + + /* A flag telling whether to skip printing the time. */ + uint32_t skip_time:1; + + /* A flag telling whether to skip printing the calibration. */ + uint32_t skip_tcal:1; +}; + +struct ptdump_tracking { + /* Track last-ip. */ + struct pt_last_ip last_ip; + + /* Track time calibration. */ + struct pt_time_cal tcal; + + /* Track time. */ + struct pt_time time; + + /* The last estimated TSC. */ + uint64_t tsc; + + /* The last calibration value. */ + uint64_t fcr; + + /* Header vs. normal decode. Set if decoding PSB+. */ + uint32_t in_header:1; +}; + +static int usage(const char *name) +{ + fprintf(stderr, + "%s: [] . Use --help or -h for help.\n", + name); + return -1; +} + +static int no_file_error(const char *name) +{ + fprintf(stderr, "%s: No processor trace file specified.\n", name); + return -1; +} + +static int unknown_option_error(const char *arg, const char *name) +{ + fprintf(stderr, "%s: unknown option: %s.\n", name, arg); + return -1; +} + +static int help(const char *name) +{ + printf("usage: %s [] [:[-]\n\n", name); + printf("options:\n"); + printf(" --help|-h this text.\n"); + printf(" --version display version information and exit.\n"); + printf(" --no-sync don't try to sync to the first PSB, assume a valid\n"); + printf(" sync point at the beginning of the trace.\n"); + printf(" --quiet don't print anything but errors.\n"); + printf(" --no-pad don't show PAD packets.\n"); + printf(" --no-timing don't show timing packets.\n"); + printf(" --no-cyc don't show CYC packets and ignore them when tracking time.\n"); + printf(" --no-offset don't show the offset as the first column.\n"); + printf(" --raw show raw packet bytes.\n"); + printf(" --lastip show last IP updates on packets with IP payloads.\n"); + printf(" --exec-mode show the current execution mode on mode.exec packets.\n"); + printf(" --time show the estimated TSC on timing packets.\n"); + printf(" --tcal show time calibration information.\n"); + printf(" --time-delta show timing information as delta.\n"); + printf(" --no-tcal skip timing calibration.\n"); + printf(" this will result in errors when CYC packets are encountered.\n"); + printf(" --no-wall-clock suppress the no-time error and print relative time.\n"); + printf(" --cpu none|auto|f/m[/s] set cpu to the given value and decode according to:\n"); + printf(" none spec (default)\n"); + printf(" auto current cpu\n"); + printf(" f/m[/s] family/model[/stepping]\n"); + printf(" --mtc-freq set the MTC frequency (IA32_RTIT_CTL[17:14]) to .\n"); + printf(" --nom-freq set the nominal frequency (MSR_PLATFORM_INFO[15:8]) to .\n"); + printf(" --cpuid-0x15.eax set the value of cpuid[0x15].eax.\n"); + printf(" --cpuid-0x15.ebx set the value of cpuid[0x15].ebx.\n"); + printf(" [:[-]] load the processor trace data from ;\n"); + + return 0; +} + +static int version(const char *name) +{ + struct pt_version v = pt_library_version(); + + printf("%s-%d.%d.%d%s / libipt-%" PRIu8 ".%" PRIu8 ".%" PRIu32 "%s\n", + name, PT_VERSION_MAJOR, PT_VERSION_MINOR, PT_VERSION_BUILD, + PT_VERSION_EXT, v.major, v.minor, v.build, v.ext); + return 0; +} + +static int parse_range(const char *arg, uint64_t *begin, uint64_t *end) +{ + char *rest; + + if (!arg || !*arg) + return 0; + + errno = 0; + *begin = strtoull(arg, &rest, 0); + if (errno) + return -1; + + if (!*rest) + return 1; + + if (*rest != '-') + return -1; + + *end = strtoull(rest+1, &rest, 0); + if (errno || *rest) + return -1; + + return 2; +} + +/* Preprocess a filename argument. + * + * A filename may optionally be followed by a file offset or a file range + * argument separated by ':'. Split the original argument into the filename + * part and the offset/range part. + * + * If no end address is specified, set @size to zero. + * If no offset is specified, set @offset to zero. + * + * Returns zero on success, a negative error code otherwise. + */ +static int preprocess_filename(char *filename, uint64_t *offset, uint64_t *size) +{ + uint64_t begin, end; + char *range; + int parts; + + if (!filename || !offset || !size) + return -pte_internal; + + /* Search from the end as the filename may also contain ':'. */ + range = strrchr(filename, ':'); + if (!range) { + *offset = 0ull; + *size = 0ull; + + return 0; + } + + /* Let's try to parse an optional range suffix. + * + * If we can, remove it from the filename argument. + * If we can not, assume that the ':' is part of the filename, e.g. a + * drive letter on Windows. + */ + parts = parse_range(range + 1, &begin, &end); + if (parts <= 0) { + *offset = 0ull; + *size = 0ull; + + return 0; + } + + if (parts == 1) { + *offset = begin; + *size = 0ull; + + *range = 0; + + return 0; + } + + if (parts == 2) { + if (end <= begin) + return -pte_invalid; + + *offset = begin; + *size = end - begin; + + *range = 0; + + return 0; + } + + return -pte_internal; +} + +static int load_file(uint8_t **buffer, size_t *psize, const char *filename, + uint64_t offset, uint64_t size, const char *prog) +{ + uint8_t *content; + size_t read; + FILE *file; + long fsize, begin, end; + int errcode; + + if (!buffer || !psize || !filename || !prog) { + fprintf(stderr, "%s: internal error.\n", prog ? prog : ""); + return -1; + } + + errno = 0; + file = fopen(filename, "rb"); + if (!file) { + fprintf(stderr, "%s: failed to open %s: %d.\n", + prog, filename, errno); + return -1; + } + + errcode = fseek(file, 0, SEEK_END); + if (errcode) { + fprintf(stderr, "%s: failed to determine size of %s: %d.\n", + prog, filename, errno); + goto err_file; + } + + fsize = ftell(file); + if (fsize < 0) { + fprintf(stderr, "%s: failed to determine size of %s: %d.\n", + prog, filename, errno); + goto err_file; + } + + begin = (long) offset; + if (((uint64_t) begin != offset) || (fsize <= begin)) { + fprintf(stderr, + "%s: bad offset 0x%" PRIx64 " into %s.\n", + prog, offset, filename); + goto err_file; + } + + end = fsize; + if (size) { + uint64_t range_end; + + range_end = offset + size; + if ((uint64_t) end < range_end) { + fprintf(stderr, + "%s: bad range 0x%" PRIx64 " in %s.\n", + prog, range_end, filename); + goto err_file; + } + + end = (long) range_end; + } + + fsize = end - begin; + + content = malloc(fsize); + if (!content) { + fprintf(stderr, "%s: failed to allocated memory %s.\n", + prog, filename); + goto err_file; + } + + errcode = fseek(file, begin, SEEK_SET); + if (errcode) { + fprintf(stderr, "%s: failed to load %s: %d.\n", + prog, filename, errno); + goto err_content; + } + + read = fread(content, fsize, 1, file); + if (read != 1) { + fprintf(stderr, "%s: failed to load %s: %d.\n", + prog, filename, errno); + goto err_content; + } + + fclose(file); + + *buffer = content; + *psize = fsize; + + return 0; + +err_content: + free(content); + +err_file: + fclose(file); + return -1; +} + +static int load_pt(struct pt_config *config, const char *filename, + uint64_t foffset, uint64_t fsize, const char *prog) +{ + uint8_t *buffer; + size_t size; + int errcode; + + errcode = load_file(&buffer, &size, filename, foffset, fsize, prog); + if (errcode < 0) + return errcode; + + config->begin = buffer; + config->end = buffer + size; + + return 0; +} + +static int diag(const char *errstr, uint64_t offset, int errcode) +{ + if (errcode) + printf("[%" PRIx64 ": %s: %s]\n", offset, errstr, + pt_errstr(pt_errcode(errcode))); + else + printf("[%" PRIx64 ": %s]\n", offset, errstr); + + return errcode; +} + +static void ptdump_tracking_init(struct ptdump_tracking *tracking) +{ + if (!tracking) + return; + + pt_last_ip_init(&tracking->last_ip); + pt_tcal_init(&tracking->tcal); + pt_time_init(&tracking->time); + + tracking->tsc = 0ull; + tracking->fcr = 0ull; + tracking->in_header = 0; +} + +static void ptdump_tracking_reset(struct ptdump_tracking *tracking) +{ + if (!tracking) + return; + + pt_last_ip_init(&tracking->last_ip); + pt_tcal_init(&tracking->tcal); + pt_time_init(&tracking->time); + + tracking->tsc = 0ull; + tracking->fcr = 0ull; + tracking->in_header = 0; +} + +static void ptdump_tracking_fini(struct ptdump_tracking *tracking) +{ + (void) tracking; + + /* Nothing to do. */ +} + +#define print_field(field, ...) \ + do { \ + /* Avoid partial overwrites. */ \ + memset(field, 0, sizeof(field)); \ + snprintf(field, sizeof(field), __VA_ARGS__); \ + } while (0) + + +static int print_buffer(struct ptdump_buffer *buffer, uint64_t offset, + const struct ptdump_options *options) +{ + const char *sep; + + if (!buffer) + return diag("error printing buffer", offset, -pte_internal); + + if (buffer->skip || options->quiet) + return 0; + + /* Make sure the first column starts at the beginning of the line - no + * matter what column is first. + */ + sep = ""; + + if (options->show_offset) { + printf("%-*s", (int) sizeof(buffer->offset), buffer->offset); + sep = " "; + } + + if (buffer->raw[0]) { + printf("%s%-*s", sep, (int) sizeof(buffer->raw), buffer->raw); + sep = " "; + } + + if (buffer->payload.standard[0]) + printf("%s%-*s", sep, (int) sizeof(buffer->opcode), + buffer->opcode); + else + printf("%s%s", sep, buffer->opcode); + + /* We printed at least one column. From this point on, we don't need + * the separator any longer. + */ + + if (buffer->use_ext_payload) + printf(" %s", buffer->payload.extended); + else if (buffer->tracking.id[0]) { + printf(" %-*s", (int) sizeof(buffer->payload.standard), + buffer->payload.standard); + + printf(" %-*s", (int) sizeof(buffer->tracking.id), + buffer->tracking.id); + printf("%s", buffer->tracking.payload); + } else if (buffer->payload.standard[0]) + printf(" %s", buffer->payload.standard); + + printf("\n"); + return 0; +} + +static int print_raw(struct ptdump_buffer *buffer, uint64_t offset, + const struct pt_packet *packet, + const struct pt_config *config) +{ + const uint8_t *begin, *end; + char *bbegin, *bend; + + if (!buffer || !packet) + return diag("error printing packet", offset, -pte_internal); + + begin = config->begin + offset; + end = begin + packet->size; + + if (config->end < end) + return diag("bad packet size", offset, -pte_bad_packet); + + bbegin = buffer->raw; + bend = bbegin + sizeof(buffer->raw); + + for (; begin < end; ++begin) { + char *pos; + + pos = bbegin; + bbegin += 2; + + if (bend <= bbegin) + return diag("truncating raw packet", offset, 0); + + sprintf(pos, "%02x", *begin); + } + + return 0; +} + +static int track_last_ip(struct ptdump_buffer *buffer, + struct pt_last_ip *last_ip, uint64_t offset, + const struct pt_packet_ip *packet, + const struct ptdump_options *options, + const struct pt_config *config) +{ + uint64_t ip; + int errcode; + + if (!buffer || !options) + return diag("error tracking last-ip", offset, -pte_internal); + + print_field(buffer->tracking.id, "ip"); + + errcode = pt_last_ip_update_ip(last_ip, packet, config); + if (errcode < 0) { + print_field(buffer->tracking.payload, ""); + + return diag("error tracking last-ip", offset, errcode); + } + + errcode = pt_last_ip_query(&ip, last_ip); + if (errcode < 0) { + if (errcode == -pte_ip_suppressed) + print_field(buffer->tracking.payload, ""); + else { + print_field(buffer->tracking.payload, ""); + + return diag("error tracking last-ip", offset, errcode); + } + } else + print_field(buffer->tracking.payload, "%016" PRIx64, ip); + + return 0; +} + + +static int print_time(struct ptdump_buffer *buffer, + struct ptdump_tracking *tracking, uint64_t offset, + const struct ptdump_options *options) +{ + uint64_t tsc; + int errcode; + + if (!tracking || !options) + return diag("error printing time", offset, -pte_internal); + + print_field(buffer->tracking.id, "tsc"); + + errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tracking->time); + if (errcode < 0) { + switch (-errcode) { + case pte_no_time: + if (options->no_wall_clock) + break; + + /* Fall through. */ + default: + diag("error printing time", offset, errcode); + print_field(buffer->tracking.payload, ""); + return errcode; + } + } + + if (options->show_time_as_delta) { + uint64_t old_tsc; + + old_tsc = tracking->tsc; + if (old_tsc <= tsc) + print_field(buffer->tracking.payload, "+%" PRIx64, + tsc - old_tsc); + else + print_field(buffer->tracking.payload, "-%" PRIx64, + old_tsc - tsc); + + tracking->tsc = tsc; + } else + print_field(buffer->tracking.payload, "%016" PRIx64, tsc); + + return 0; +} + +static int print_tcal(struct ptdump_buffer *buffer, + struct ptdump_tracking *tracking, uint64_t offset, + const struct ptdump_options *options) +{ + uint64_t fcr; + double dfcr; + int errcode; + + if (!tracking || !options) + return diag("error printing time", offset, -pte_internal); + + print_field(buffer->tracking.id, "fcr"); + + errcode = pt_tcal_fcr(&fcr, &tracking->tcal); + if (errcode < 0) { + print_field(buffer->tracking.payload, ""); + return diag("error printing time", offset, errcode); + } + + /* We print fcr as double to account for the shift. */ + dfcr = (double) fcr; + dfcr /= (double) (1ull << pt_tcal_fcr_shr); + + if (options->show_time_as_delta) { + uint64_t old_fcr; + double dold_fcr; + + old_fcr = tracking->fcr; + + /* We print fcr as double to account for the shift. */ + dold_fcr = (double) old_fcr; + dold_fcr /= (double) (1ull << pt_tcal_fcr_shr); + + if (old_fcr <= fcr) + print_field(buffer->tracking.payload, "+%.3f", + dfcr - dold_fcr); + else + print_field(buffer->tracking.payload, "-%.3f", + dold_fcr - dfcr); + + tracking->fcr = fcr; + } else + print_field(buffer->tracking.payload, "%.3f", dfcr); + + return 0; +} + +static int track_time(struct ptdump_buffer *buffer, + struct ptdump_tracking *tracking, uint64_t offset, + const struct ptdump_options *options) +{ + if (!tracking || !options) + return diag("error tracking time", offset, -pte_internal); + + if (options->show_tcal && !buffer->skip_tcal) + print_tcal(buffer, tracking, offset, options); + + if (options->show_time && !buffer->skip_time) + print_time(buffer, tracking, offset, options); + + return 0; +} + +static int track_tsc(struct ptdump_buffer *buffer, + struct ptdump_tracking *tracking, uint64_t offset, + const struct pt_packet_tsc *packet, + const struct ptdump_options *options, + const struct pt_config *config) +{ + int errcode; + + if (!buffer || !tracking || !options) + return diag("error tracking time", offset, -pte_internal); + + if (!options->no_tcal) { + errcode = tracking->in_header ? + pt_tcal_header_tsc(&tracking->tcal, packet, config) : + pt_tcal_update_tsc(&tracking->tcal, packet, config); + if (errcode < 0) + diag("error calibrating time", offset, errcode); + } + + errcode = pt_time_update_tsc(&tracking->time, packet, config); + if (errcode < 0) + diag("error updating time", offset, errcode); + + return track_time(buffer, tracking, offset, options); +} + +static int track_cbr(struct ptdump_buffer *buffer, + struct ptdump_tracking *tracking, uint64_t offset, + const struct pt_packet_cbr *packet, + const struct ptdump_options *options, + const struct pt_config *config) +{ + int errcode; + + if (!buffer || !tracking || !options) + return diag("error tracking time", offset, -pte_internal); + + if (!options->no_tcal) { + errcode = tracking->in_header ? + pt_tcal_header_cbr(&tracking->tcal, packet, config) : + pt_tcal_update_cbr(&tracking->tcal, packet, config); + if (errcode < 0) + diag("error calibrating time", offset, errcode); + } + + errcode = pt_time_update_cbr(&tracking->time, packet, config); + if (errcode < 0) + diag("error updating time", offset, errcode); + + /* There is no timing update at this packet. */ + buffer->skip_time = 1; + + return track_time(buffer, tracking, offset, options); +} + +static int track_tma(struct ptdump_buffer *buffer, + struct ptdump_tracking *tracking, uint64_t offset, + const struct pt_packet_tma *packet, + const struct ptdump_options *options, + const struct pt_config *config) +{ + int errcode; + + if (!buffer || !tracking || !options) + return diag("error tracking time", offset, -pte_internal); + + if (!options->no_tcal) { + errcode = pt_tcal_update_tma(&tracking->tcal, packet, config); + if (errcode < 0) + diag("error calibrating time", offset, errcode); + } + + errcode = pt_time_update_tma(&tracking->time, packet, config); + if (errcode < 0) + diag("error updating time", offset, errcode); + + /* There is no calibration update at this packet. */ + buffer->skip_tcal = 1; + + return track_time(buffer, tracking, offset, options); +} + +static int track_mtc(struct ptdump_buffer *buffer, + struct ptdump_tracking *tracking, uint64_t offset, + const struct pt_packet_mtc *packet, + const struct ptdump_options *options, + const struct pt_config *config) +{ + int errcode; + + if (!buffer || !tracking || !options) + return diag("error tracking time", offset, -pte_internal); + + if (!options->no_tcal) { + errcode = pt_tcal_update_mtc(&tracking->tcal, packet, config); + if (errcode < 0) + diag("error calibrating time", offset, errcode); + } + + errcode = pt_time_update_mtc(&tracking->time, packet, config); + if (errcode < 0) + diag("error updating time", offset, errcode); + + return track_time(buffer, tracking, offset, options); +} + +static int track_cyc(struct ptdump_buffer *buffer, + struct ptdump_tracking *tracking, uint64_t offset, + const struct pt_packet_cyc *packet, + const struct ptdump_options *options, + const struct pt_config *config) +{ + uint64_t fcr; + int errcode; + + if (!buffer || !tracking || !options) + return diag("error tracking time", offset, -pte_internal); + + /* Initialize to zero in case of calibration errors. */ + fcr = 0ull; + + if (!options->no_tcal) { + errcode = pt_tcal_fcr(&fcr, &tracking->tcal); + if (errcode < 0) + diag("calibration error", offset, errcode); + + errcode = pt_tcal_update_cyc(&tracking->tcal, packet, config); + if (errcode < 0) + diag("error calibrating time", offset, errcode); + } + + errcode = pt_time_update_cyc(&tracking->time, packet, config, fcr); + if (errcode < 0) + diag("error updating time", offset, errcode); + else if (!fcr) + diag("error updating time: no calibration", offset, 0); + + /* There is no calibration update at this packet. */ + buffer->skip_tcal = 1; + + return track_time(buffer, tracking, offset, options); +} + +static uint64_t sext(uint64_t val, uint8_t sign) +{ + uint64_t signbit, mask; + + signbit = 1ull << (sign - 1); + mask = ~0ull << sign; + + return val & signbit ? val | mask : val & ~mask; +} + +static int print_ip_payload(struct ptdump_buffer *buffer, uint64_t offset, + const struct pt_packet_ip *packet) +{ + if (!buffer || !packet) + return diag("error printing payload", offset, -pte_internal); + + switch (packet->ipc) { + case pt_ipc_suppressed: + print_field(buffer->payload.standard, "%x: ????????????????", + pt_ipc_suppressed); + return 0; + + case pt_ipc_update_16: + print_field(buffer->payload.standard, "%x: ????????????%04" + PRIx64, pt_ipc_update_16, packet->ip); + return 0; + + case pt_ipc_update_32: + print_field(buffer->payload.standard, "%x: ????????%08" + PRIx64, pt_ipc_update_32, packet->ip); + return 0; + + case pt_ipc_update_48: + print_field(buffer->payload.standard, "%x: ????%012" + PRIx64, pt_ipc_update_48, packet->ip); + return 0; + + case pt_ipc_sext_48: + print_field(buffer->payload.standard, "%x: %016" PRIx64, + pt_ipc_sext_48, sext(packet->ip, 48)); + return 0; + + case pt_ipc_full: + print_field(buffer->payload.standard, "%x: %016" PRIx64, + pt_ipc_full, packet->ip); + return 0; + } + + print_field(buffer->payload.standard, "%x: %016" PRIx64, + packet->ipc, packet->ip); + return diag("bad ipc", offset, -pte_bad_packet); +} + +static int print_tnt_payload(struct ptdump_buffer *buffer, uint64_t offset, + const struct pt_packet_tnt *packet) +{ + uint64_t tnt; + uint8_t bits; + char *begin, *end; + + if (!buffer || !packet) + return diag("error printing payload", offset, -pte_internal); + + bits = packet->bit_size; + tnt = packet->payload; + + begin = buffer->payload.extended; + end = begin + bits; + + if (sizeof(buffer->payload.extended) < bits) { + diag("truncating tnt payload", offset, 0); + + end = begin + sizeof(buffer->payload.extended); + } + + for (; begin < end; ++begin, --bits) + *begin = tnt & (1ull << (bits - 1)) ? '!' : '.'; + + return 0; +} + +static const char *print_exec_mode(const struct pt_packet_mode_exec *packet, + uint64_t offset) +{ + enum pt_exec_mode mode; + + mode = pt_get_exec_mode(packet); + switch (mode) { + case ptem_64bit: + return "64-bit"; + + case ptem_32bit: + return "32-bit"; + + case ptem_16bit: + return "16-bit"; + + case ptem_unknown: + return "unknown"; + } + + diag("bad exec mode", offset, -pte_bad_packet); + return "invalid"; +} + +static int print_packet(struct ptdump_buffer *buffer, uint64_t offset, + const struct pt_packet *packet, + struct ptdump_tracking *tracking, + const struct ptdump_options *options, + const struct pt_config *config) +{ + if (!buffer || !packet || !tracking || !options) + return diag("error printing packet", offset, -pte_internal); + + switch (packet->type) { + case ppt_unknown: + print_field(buffer->opcode, ""); + return 0; + + case ppt_invalid: + print_field(buffer->opcode, ""); + return 0; + + case ppt_psb: + print_field(buffer->opcode, "psb"); + + tracking->in_header = 1; + return 0; + + case ppt_psbend: + print_field(buffer->opcode, "psbend"); + + tracking->in_header = 0; + return 0; + + case ppt_pad: + print_field(buffer->opcode, "pad"); + + if (options->no_pad) + buffer->skip = 1; + return 0; + + case ppt_ovf: + print_field(buffer->opcode, "ovf"); + return 0; + + case ppt_stop: + print_field(buffer->opcode, "stop"); + return 0; + + case ppt_fup: + print_field(buffer->opcode, "fup"); + print_ip_payload(buffer, offset, &packet->payload.ip); + + if (options->show_last_ip) + track_last_ip(buffer, &tracking->last_ip, offset, + &packet->payload.ip, options, config); + return 0; + + case ppt_tip: + print_field(buffer->opcode, "tip"); + print_ip_payload(buffer, offset, &packet->payload.ip); + + if (options->show_last_ip) + track_last_ip(buffer, &tracking->last_ip, offset, + &packet->payload.ip, options, config); + return 0; + + case ppt_tip_pge: + print_field(buffer->opcode, "tip.pge"); + print_ip_payload(buffer, offset, &packet->payload.ip); + + if (options->show_last_ip) + track_last_ip(buffer, &tracking->last_ip, offset, + &packet->payload.ip, options, config); + return 0; + + case ppt_tip_pgd: + print_field(buffer->opcode, "tip.pgd"); + print_ip_payload(buffer, offset, &packet->payload.ip); + + if (options->show_last_ip) + track_last_ip(buffer, &tracking->last_ip, offset, + &packet->payload.ip, options, config); + return 0; + + case ppt_pip: + print_field(buffer->opcode, "pip"); + print_field(buffer->payload.standard, "%" PRIx64 "%s", + packet->payload.pip.cr3, + packet->payload.pip.nr ? ", nr" : ""); + + print_field(buffer->tracking.id, "cr3"); + print_field(buffer->tracking.payload, "%016" PRIx64, + packet->payload.pip.cr3); + return 0; + + case ppt_vmcs: + print_field(buffer->opcode, "vmcs"); + print_field(buffer->payload.standard, "%" PRIx64, + packet->payload.vmcs.base); + + print_field(buffer->tracking.id, "vmcs"); + print_field(buffer->tracking.payload, "%016" PRIx64, + packet->payload.vmcs.base); + return 0; + + case ppt_tnt_8: + print_field(buffer->opcode, "tnt.8"); + return print_tnt_payload(buffer, offset, &packet->payload.tnt); + + case ppt_tnt_64: + print_field(buffer->opcode, "tnt.64"); + return print_tnt_payload(buffer, offset, &packet->payload.tnt); + + case ppt_mode: { + const struct pt_packet_mode *mode; + + mode = &packet->payload.mode; + switch (mode->leaf) { + case pt_mol_exec: { + const char *csd, *csl, *sep; + + csd = mode->bits.exec.csd ? "cs.d" : ""; + csl = mode->bits.exec.csl ? "cs.l" : ""; + + sep = csd[0] && csl[0] ? ", " : ""; + + print_field(buffer->opcode, "mode.exec"); + print_field(buffer->payload.standard, "%s%s%s", + csd, sep, csl); + + if (options->show_exec_mode) { + const char *em; + + em = print_exec_mode(&mode->bits.exec, offset); + print_field(buffer->tracking.id, "em"); + print_field(buffer->tracking.payload, "%s", em); + } + } + return 0; + + case pt_mol_tsx: { + const char *intx, *abrt, *sep; + + intx = mode->bits.tsx.intx ? "intx" : ""; + abrt = mode->bits.tsx.abrt ? "abrt" : ""; + + sep = intx[0] && abrt[0] ? ", " : ""; + + print_field(buffer->opcode, "mode.tsx"); + print_field(buffer->payload.standard, "%s%s%s", + intx, sep, abrt); + } + return 0; + } + + print_field(buffer->opcode, "mode"); + print_field(buffer->payload.standard, "leaf: %x", mode->leaf); + + return diag("unknown mode leaf", offset, 0); + } + + case ppt_tsc: + print_field(buffer->opcode, "tsc"); + print_field(buffer->payload.standard, "%" PRIx64, + packet->payload.tsc.tsc); + + if (options->track_time) + track_tsc(buffer, tracking, offset, + &packet->payload.tsc, options, config); + + if (options->no_timing) + buffer->skip = 1; + + return 0; + + case ppt_cbr: + print_field(buffer->opcode, "cbr"); + print_field(buffer->payload.standard, "%x", + packet->payload.cbr.ratio); + + if (options->track_time) + track_cbr(buffer, tracking, offset, + &packet->payload.cbr, options, config); + + if (options->no_timing) + buffer->skip = 1; + + return 0; + + case ppt_tma: + print_field(buffer->opcode, "tma"); + print_field(buffer->payload.standard, "%x, %x", + packet->payload.tma.ctc, packet->payload.tma.fc); + + if (options->track_time) + track_tma(buffer, tracking, offset, + &packet->payload.tma, options, config); + + if (options->no_timing) + buffer->skip = 1; + + return 0; + + case ppt_mtc: + print_field(buffer->opcode, "mtc"); + print_field(buffer->payload.standard, "%x", + packet->payload.mtc.ctc); + + if (options->track_time) + track_mtc(buffer, tracking, offset, + &packet->payload.mtc, options, config); + + if (options->no_timing) + buffer->skip = 1; + + return 0; + + case ppt_cyc: + print_field(buffer->opcode, "cyc"); + print_field(buffer->payload.standard, "%" PRIx64, + packet->payload.cyc.value); + + if (options->track_time && !options->no_cyc) + track_cyc(buffer, tracking, offset, + &packet->payload.cyc, options, config); + + if (options->no_timing || options->no_cyc) + buffer->skip = 1; + + return 0; + + case ppt_mnt: + print_field(buffer->opcode, "mnt"); + print_field(buffer->payload.standard, "%" PRIx64, + packet->payload.mnt.payload); + return 0; + } + + return diag("unknown packet", offset, -pte_bad_opc); +} + +static int dump_one_packet(uint64_t offset, const struct pt_packet *packet, + struct ptdump_tracking *tracking, + const struct ptdump_options *options, + const struct pt_config *config) +{ + struct ptdump_buffer buffer; + int errcode; + + memset(&buffer, 0, sizeof(buffer)); + + print_field(buffer.offset, "%016" PRIx64, offset); + + if (options->show_raw_bytes) { + errcode = print_raw(&buffer, offset, packet, config); + if (errcode < 0) + return errcode; + } + + errcode = print_packet(&buffer, offset, packet, tracking, options, + config); + if (errcode < 0) + return errcode; + + return print_buffer(&buffer, offset, options); +} + +static int dump_packets(struct pt_packet_decoder *decoder, + struct ptdump_tracking *tracking, + const struct ptdump_options *options, + const struct pt_config *config) +{ + uint64_t offset; + int errcode; + + offset = 0ull; + for (;;) { + struct pt_packet packet; + + errcode = pt_pkt_get_offset(decoder, &offset); + if (errcode < 0) + return diag("error getting offset", offset, errcode); + + errcode = pt_pkt_next(decoder, &packet, sizeof(packet)); + if (errcode < 0) { + if (errcode == -pte_eos) + return 0; + + return diag("error decoding packet", offset, errcode); + } + + errcode = dump_one_packet(offset, &packet, tracking, options, + config); + if (errcode < 0) + return errcode; + } +} + +static int dump_sync(struct pt_packet_decoder *decoder, + struct ptdump_tracking *tracking, + const struct ptdump_options *options, + const struct pt_config *config) +{ + int errcode; + + if (!options) + return diag("setup error", 0ull, -pte_internal); + + if (options->no_sync) { + errcode = pt_pkt_sync_set(decoder, 0ull); + if (errcode < 0) + return diag("sync error", 0ull, errcode); + } else { + errcode = pt_pkt_sync_forward(decoder); + if (errcode < 0) { + if (errcode == -pte_eos) + return 0; + + return diag("sync error", 0ull, errcode); + } + } + + for (;;) { + errcode = dump_packets(decoder, tracking, options, config); + if (!errcode) + break; + + errcode = pt_pkt_sync_forward(decoder); + if (errcode < 0) { + if (errcode == -pte_eos) + return 0; + + return diag("sync error", 0ull, errcode); + } + + ptdump_tracking_reset(tracking); + } + + return errcode; +} + +static int dump(const struct pt_config *config, + const struct ptdump_options *options) +{ + struct pt_packet_decoder *decoder; + struct ptdump_tracking tracking; + int errcode; + + decoder = pt_pkt_alloc_decoder(config); + if (!decoder) + return diag("failed to allocate decoder", 0ull, 0); + + ptdump_tracking_init(&tracking); + + errcode = dump_sync(decoder, &tracking, options, config); + + ptdump_tracking_fini(&tracking); + pt_pkt_free_decoder(decoder); + return errcode; +} + +static int get_arg_uint64(uint64_t *value, const char *option, const char *arg, + const char *prog) +{ + char *rest; + + if (!value || !option || !prog) { + fprintf(stderr, "%s: internal error.\n", prog ? prog : "?"); + return 0; + } + + if (!arg || (arg[0] == '-' && arg[1] == '-')) { + fprintf(stderr, "%s: %s: missing argument.\n", prog, option); + return 0; + } + + errno = 0; + *value = strtoull(arg, &rest, 0); + if (errno || *rest) { + fprintf(stderr, "%s: %s: bad argument: %s.\n", prog, option, + arg); + return 0; + } + + return 1; +} + +static int get_arg_uint32(uint32_t *value, const char *option, const char *arg, + const char *prog) +{ + uint64_t val; + + if (!get_arg_uint64(&val, option, arg, prog)) + return 0; + + if (val > UINT32_MAX) { + fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option, + arg); + return 0; + } + + *value = (uint32_t) val; + + return 1; +} + +static int get_arg_uint8(uint8_t *value, const char *option, const char *arg, + const char *prog) +{ + uint64_t val; + + if (!get_arg_uint64(&val, option, arg, prog)) + return 0; + + if (val > UINT8_MAX) { + fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option, + arg); + return 0; + } + + *value = (uint8_t) val; + + return 1; +} + +int main(int argc, char *argv[]) +{ + struct ptdump_options options; + struct pt_config config; + int errcode, idx; + char *ptfile; + uint64_t pt_offset, pt_size; + + ptfile = NULL; + + memset(&options, 0, sizeof(options)); + options.show_offset = 1; + + memset(&config, 0, sizeof(config)); + pt_config_init(&config); + + for (idx = 1; idx < argc; ++idx) { + if (strncmp(argv[idx], "-", 1) != 0) { + ptfile = argv[idx]; + if (idx < (argc-1)) + return usage(argv[0]); + break; + } + + if (strcmp(argv[idx], "-h") == 0) + return help(argv[0]); + if (strcmp(argv[idx], "--help") == 0) + return help(argv[0]); + if (strcmp(argv[idx], "--version") == 0) + return version(argv[0]); + if (strcmp(argv[idx], "--no-sync") == 0) + options.no_sync = 1; + else if (strcmp(argv[idx], "--quiet") == 0) + options.quiet = 1; + else if (strcmp(argv[idx], "--no-pad") == 0) + options.no_pad = 1; + else if (strcmp(argv[idx], "--no-timing") == 0) + options.no_timing = 1; + else if (strcmp(argv[idx], "--no-cyc") == 0) + options.no_cyc = 1; + else if (strcmp(argv[idx], "--no-offset") == 0) + options.show_offset = 0; + else if (strcmp(argv[idx], "--raw") == 0) + options.show_raw_bytes = 1; + else if (strcmp(argv[idx], "--lastip") == 0) + options.show_last_ip = 1; + else if (strcmp(argv[idx], "--exec-mode") == 0) + options.show_exec_mode = 1; + else if (strcmp(argv[idx], "--time") == 0) { + if (options.show_tcal) { + fprintf(stderr, "%s: specify either --time " + "or --tcal.\n", argv[0]); + return 1; + } + + options.track_time = 1; + options.show_time = 1; + } else if (strcmp(argv[idx], "--time-delta") == 0) { + options.show_time_as_delta = 1; + } else if (strcmp(argv[idx], "--tcal") == 0) { + if (options.show_time) { + fprintf(stderr, "%s: specify either --time " + "or --tcal.\n", argv[0]); + return 1; + } + + options.track_time = 1; + options.show_tcal = 1; + } else if (strcmp(argv[idx], "--no-tcal") == 0) + options.no_tcal = 1; + else if (strcmp(argv[idx], "--no-wall-clock") == 0) + options.no_wall_clock = 1; + else if (strcmp(argv[idx], "--cpu") == 0) { + const char *arg; + + arg = argv[++idx]; + if (!arg) { + fprintf(stderr, + "%s: --cpu: missing argument.\n", + argv[0]); + return 1; + } + + if (strcmp(arg, "auto") == 0) { + errcode = pt_cpu_read(&config.cpu); + if (errcode < 0) { + fprintf(stderr, + "%s: error reading cpu: %s.\n", + argv[0], + pt_errstr(pt_errcode(errcode))); + return 1; + } + continue; + } + + if (strcmp(arg, "none") == 0) { + memset(&config.cpu, 0, sizeof(config.cpu)); + continue; + } + + errcode = pt_cpu_parse(&config.cpu, arg); + if (errcode < 0) { + fprintf(stderr, + "%s: cpu must be specified as f/m[/s]\n", + argv[0]); + return 1; + } + } else if (strcmp(argv[idx], "--mtc-freq") == 0) { + if (!get_arg_uint8(&config.mtc_freq, "--mtc-freq", + argv[++idx], argv[0])) + return 1; + } else if (strcmp(argv[idx], "--nom-freq") == 0) { + if (!get_arg_uint8(&config.nom_freq, "--nom-freq", + argv[++idx], argv[0])) + return 1; + } else if (strcmp(argv[idx], "--cpuid-0x15.eax") == 0) { + if (!get_arg_uint32(&config.cpuid_0x15_eax, + "--cpuid-0x15.eax", argv[++idx], + argv[0])) + return 1; + } else if (strcmp(argv[idx], "--cpuid-0x15.ebx") == 0) { + if (!get_arg_uint32(&config.cpuid_0x15_ebx, + "--cpuid-0x15.ebx", argv[++idx], + argv[0])) + return 1; + } else + return unknown_option_error(argv[idx], argv[0]); + } + + if (!ptfile) + return no_file_error(argv[0]); + + errcode = preprocess_filename(ptfile, &pt_offset, &pt_size); + if (errcode < 0) { + fprintf(stderr, "%s: bad file %s: %s.\n", argv[0], ptfile, + pt_errstr(pt_errcode(errcode))); + return 1; + } + + errcode = pt_cpu_errata(&config.errata, &config.cpu); + if (errcode < 0) + diag("failed to determine errata", 0ull, errcode); + + errcode = load_pt(&config, ptfile, pt_offset, pt_size, argv[0]); + if (errcode < 0) + return 1; + + errcode = dump(&config, &options); + + free(config.begin); + + return -errcode; +} diff --git a/pttc/CMakeLists.txt b/pttc/CMakeLists.txt new file mode 100644 index 0000000..3a55853 --- /dev/null +++ b/pttc/CMakeLists.txt @@ -0,0 +1,64 @@ +# Copyright (c) 2013-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +include_directories( + include + ../libipt/internal/include +) + +set(PTTC_FILES + src/errcode.c + src/file.c + src/parse.c + src/pttc.c + src/util.c + src/yasm.c + ../libipt/src/pt_cpu.c +) + +if (CMAKE_HOST_UNIX) + set(PTTC_FILES + ${PTTC_FILES} + src/posix/util.c + ../libipt/src/posix/pt_cpuid.c + ) +endif (CMAKE_HOST_UNIX) + +if (CMAKE_HOST_WIN32) + set(PTTC_FILES + ${PTTC_FILES} + src/windows/util.c + ../libipt/src/windows/pt_cpuid.c + ) +endif (CMAKE_HOST_WIN32) + +add_executable(pttc + ${PTTC_FILES} + + src/main.c +) + +target_link_libraries(pttc libipt) diff --git a/pttc/include/errcode.h b/pttc/include/errcode.h new file mode 100644 index 0000000..4bcdc81 --- /dev/null +++ b/pttc/include/errcode.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ERRCODE_H +#define ERRCODE_H + +/* Error codes. */ +enum errcode { + success, + + err_file_open, + err_file_read, + err_file_size, + err_file_write, + err_out_of_range, + + err_label_addr, + err_no_org_directive, + err_no_directive, + err_no_label, + err_label_name, + err_label_not_unique, + + err_section_no_name, + err_section_attribute_no_value, + err_section_unknown_attribute, + + err_missing_closepar, + err_missing_openpar, + + err_parse, + err_parse_int, + err_parse_int_too_big, + err_parse_ipc, + err_parse_ip_missing, + err_parse_no_args, + err_parse_trailing_tokens, + err_parse_unknown_char, + err_parse_unknown_directive, + err_parse_missing_directive, + + err_pt_lib, + + err_run, + + err_other, + + err_no_mem, + + /* Used for all invalid function arguments. */ + err_internal, + + /* Special return value used in p_process to signal that the + * rest of the file should go into a .exp file. + */ + stop_process, + + /* Maximum error code. + * + * This must always be the last element in the enum. + * It must not be used as error code. + */ + err_max +}; + +/* Map error codes to descriptions. + * + * Note, all error codes, that are returned by functions, are negative, + * so usually error codes must be negated when accessing this array. + */ +extern const char *errstr[]; + +#endif /* ERRCODE_H */ diff --git a/pttc/include/file.h b/pttc/include/file.h new file mode 100644 index 0000000..c1a651d --- /dev/null +++ b/pttc/include/file.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FILE_H +#define FILE_H + +#include + +/* Provides linewise access to a string. + * Access to the lines is guarded by the text_line function. + */ +struct text { + /* Number of lines. */ + size_t n; + + /* Each line[0] to line[n-1] points to the start of the + * corresponding line. + */ + char **line; +}; + +/* Allocates new text. + * + * Note, if s is NULL or the empty string the text has zero lines. + * + * Returns a non-NULL text object on success; NULL otherwise. + */ +extern struct text *text_alloc(const char *s); + +/* Deallocates @t. + * If @t is the NULL pointer, nothing happens. + */ +extern void text_free(struct text *t); + +/* Initializes @t with @s. All "\n" or "\r\n" lineendings, will be + * replaced with '\0'. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @t is the NULL pointer. + */ +extern int text_parse(struct text *t, const char *s); + +/* Copies at most @destlen characters of line @n from text @t to @dest. + * The line counts start with 0. + * If @dest is the NULL pointer just the line number is checked. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @t is the NULL pointer or if @dest is the + * NULL pointer, but @destlen is non-zero. + * Returns -err_out_of_range if @n is not in the range. + * + * Note, the string is always null byte terminated on success. + */ +extern int text_line(const struct text *t, char *dest, size_t destlen, + size_t n); + +/* Provides access to lines of files. Access to all files is cached + * after the first request. + * + * By convention, the first file_list element in the list is the head + * and stores no file information. + */ +struct file_list { + /* Name of the file. */ + char *filename; + + /* The content of the file. */ + struct text *text; + + /* Points to the next file list entry. It's NULL if the + * current file_list is the last entry in the list. + */ + struct file_list *next; +}; + +/* Allocates a new file list. + * + * Returns a non-NULL file list object on succes; NULL otherwise. + */ +extern struct file_list *fl_alloc(void); + +/* Deallocates @fl. + * If @fl is the NULL pointer, nothing happens. + */ +extern void fl_free(struct file_list *fl); + +/* Looks up line @n in a file @filename. The line content is stored in + * @dest, which should have a capacity of @destlen. + * If @dest is the NULL pointer just the line number is checked. + * See function text_line how the line is copied to @dest. + * The file @filename is loaded implicitly. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @fl or @filename is the NULL pointer or if + * @dest is the NULL pointer, but @destlen is non-zero. + * Returns -err_out_of_range if n is not a valid line number. + * Returns -err_file_stat if @filename could not be found. + * Returns -err_file_open if @filename could not be opened. + * Returns -err_file_read if the content of @filename could not be fully + * read. + */ +extern int fl_getline(struct file_list *fl, char *dest, size_t destlen, + const char *filename, size_t n); + +/* Looks up the text for @filename and stores its contents in @t. + * The file @filename is loaded implicitly. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @fl or @t or @filename is the NULL pointer. + * Returns -err_file_stat if @filename could not be found. + * Returns -err_file_open if @filename could not be opened. + * Returns -err_file_read if the content of @filename could not be fully + * read. + */ +extern int fl_gettext(struct file_list *fl, const struct text **t, + const char *filename); + +#endif /* FILE_H */ diff --git a/pttc/include/parse.h b/pttc/include/parse.h new file mode 100644 index 0000000..0870925 --- /dev/null +++ b/pttc/include/parse.h @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PARSE_H +#define PARSE_H + +#include "yasm.h" + +#include "intel-pt.h" + +#include +#include +#include + +/* Represents the parser. */ +struct parser { + /* File pointer to the trace output file. */ + FILE *ptfile; + + /* Filename of the trace output file. The filename is + * determined from the .asm file given during p_alloc. + */ + char *ptfilename; + + /* The yasm structure, initialized with pttfile in p_alloc. */ + struct yasm *y; + + /* Current pt directive. */ + struct pt_directive *pd; + + /* The encoder configuration, passed during p_alloc. */ + const struct pt_config *conf; + + /* Labels for @pt directives. */ + struct label *pt_labels; + + /* Number of bytes written to pt file. */ + int pt_bytes_written; +}; + +/* Instantiates a parser and starts parsing of @pttfile and writes PT + * stream using @conf. + * + * Returns 0 on success; a negative enum errcode otherwise. + */ +extern int parse(const char *pttfile, const struct pt_config *conf); + +/* Parses an empty payload. + * + * Returns 0 on success; a negative enum errcode othewise. + * Returns -err_parse_trailing_tokens if @payload has non whitespace + * characters. + */ +extern int parse_empty(char *payload); + +/* Parses tnt @payload. Takens are expressed with 't' and Not-Takens + * with 'n'. The t's and n's can be separated with spaces, periods or + * directly concatenated. + * + * On success the TNT bitfield will be stored in the location of @tnt; the + * number of T's and N's is stored in the location of @size. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @payload or @tnt or @size is the NULL + * pointer. + * Returns -err_parse_unknown_char if there is an unrecognized character + * in the payload. + */ +extern int parse_tnt(uint64_t *tnt, uint8_t *size, char *payload); + +/* Parses an address and a ipc from @payload and stores it in the + * location of @ip and @ipc respectively. The ipc is separated from the + * address with space or comma. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @p or @ip or @ipc is the NULL pointer. + * Returns -err_parse_int if ip or ipc in the @payload could not be + * parsed as integer. + * Returns -err_parse_ipc if the ipc argument is missing or malformed. + * Returns -err_parse_trailing_tokens if the @payload contains more than + * 2 arguments. + */ +extern int parse_ip(struct parser *p, uint64_t *ip, + enum pt_ip_compression *ipc, char *payload); + +/* Parses a uint64_t value from @payload and stores it in the memory + * location where @x points to. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @x is the NULL pointer. + * Returns -err_parse_no_args if @payload contains no arguments. + * Returns -err_parse_int if @payload cannot be parsed as integer. + */ +extern int parse_uint64(uint64_t *x, char *payload); + +/* Parses a uint8_t value from @payload and stores it in the memory + * location where @x points to. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @x is the NULL pointer. + * Returns -err_parse_no_args if @payload contains no arguments. + * Returns -err_parse_int if @payload cannot be parsed as integer. + * Returns -err_parse_int_too_big if the integer parsed from @payload + * cannot be represented in uint8_t. + */ +extern int parse_uint8(uint8_t *x, char *payload); + +/* Parses a uint16_t value from @payload and stores it in the memory + * location where @x points to. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @x is the NULL pointer. + * Returns -err_parse_no_args if @payload contains no arguments. + * Returns -err_parse_int if @payload cannot be parsed as integer. + * Returns -err_parse_int_too_big if the integer parsed from @payload + * cannot be represented in uint16_t. + */ +extern int parse_uint16(uint16_t *x, char *payload); + +/* Parses a uint32_t value from @payload and stores it in the memory + * location where @x points to. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @x is the NULL pointer. + * Returns -err_parse_no_args if @payload contains no arguments. + * Returns -err_parse_int if @payload cannot be parsed as integer. + * Returns -err_parse_int_too_big if the integer parsed from @payload + * cannot be represented in uint32_t. + */ +extern int parse_uint32(uint32_t *x, char *payload); + +/* Parses the comma-separated ctc and fc arguments of a tma packet. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @ctc or @fc is the NULL pointer. + * Returns -err_parse_int if ctc or fc in the @payload could not be + * parsed as integer. + * Returns -err_parse_trailing_tokens if the @payload contains more than + * 2 arguments. + */ +extern int parse_tma(uint16_t *ctc, uint16_t *fc, char *payload); + +#endif /* PARSE_H */ diff --git a/pttc/include/pttc.h b/pttc/include/pttc.h new file mode 100644 index 0000000..7f38151 --- /dev/null +++ b/pttc/include/pttc.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PTTC_H +#define PTTC_H + +#include "intel-pt.h" + +/* Options that are passed to pttc main. */ +struct pttc_options { + /* The cpu that should be used for encoding. */ + struct pt_cpu cpu; + + /* The input .ptt file. */ + const char *pttfile; +}; + +/* Starts the parsing process with @asmfile. + * + * Returns 0 on success; a negative enum errcode otherwise. + */ +extern int pttc_main(const struct pttc_options *options); + +#endif /* PTTC_H */ diff --git a/pttc/include/util.h b/pttc/include/util.h new file mode 100644 index 0000000..234b836 --- /dev/null +++ b/pttc/include/util.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UTIL_H +#define UTIL_H + +#include + +/* Duplicates @s and returns a pointer to it. + * + * The returned pointer must be freed by the caller. + * + * Returns the pointer to the duplicate on success; otherwise NULL is + * returned. + */ +extern char *duplicate_str(const char *s); + +/* Converts the string @str into an usigned x-bit value @val using base @base. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if either @str or @val is NULL. + * Returns -err_parse_int if there was a general parsing error. + * Returns -err_parse_int_too_big if parsed value wouldn't fit into x bit. + */ +extern int str_to_uint64(const char *str, uint64_t *val, int base); +extern int str_to_uint32(const char *str, uint32_t *val, int base); +extern int str_to_uint16(const char *str, uint16_t *val, int base); +extern int str_to_uint8(const char *str, uint8_t *val, int base); + +/* Executes @file and passes @argv as command-line arguments. + * The last element in @argv must be NULL. + * + * Returns 0 on success; a negative enum errcode otherwise. + */ +extern int run(const char *file, char *const argv[]); + +/* Prints condstr, together with file and line, to stderr if cond is not 0. + * Please do not use this function directly, use the bug_on convenience + * macro. + * + * Returns cond. + */ +extern int do_bug_on(int cond, const char *condstr, const char *file, int line); + +/* Convenience macro that wraps cond as condstr and current file and line + * for do_bug_on. + * + * Returns cond. + */ +#define bug_on(cond) do_bug_on(cond, #cond, __FILE__, __LINE__) + +/* Represents a label list with the corresponding address. + * + * By convention, the first label in the list is the head and stores + * no label information. + */ +struct label { + /* Labelname. */ + char *name; + + /* Address associated with the label. */ + uint64_t addr; + + /* The next label in the list. */ + struct label *next; +}; + +/* Allocates a new label list. + * + * Returns a non-NULL label list object on success; NULL otherwise. + */ +extern struct label *l_alloc(void); + +/* Deallocates and clears all elements in the list denoted by @l. + * If @l is the NULL pointer, nothing happens. + */ +extern void l_free(struct label *l); + +/* Appends a label to the last element in @l with @name and @addr. + * + * Returns 0 on success; a negative enum errcode otherwise. + */ +extern int l_append(struct label *l, const char *name, uint64_t addr); + +/* Looks up the label @name in @l and stores the address where @addr points to. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @l or @addr or @name is the NULL pointer. + * Returns -err_no_label if a label with @name does not exist in @l. + */ +extern int l_lookup(const struct label *l, uint64_t *addr, const char *name); + +/* Find the label @name in @l and return a pointer to it. + * + * Returns a pointer to the found label on success; NULL otherwise. + */ +extern struct label *l_find(struct label *l, const char *name); + +#endif /* UTIL_H */ diff --git a/pttc/include/yasm.h b/pttc/include/yasm.h new file mode 100644 index 0000000..badd0f7 --- /dev/null +++ b/pttc/include/yasm.h @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef YASM_H +#define YASM_H + +#include "file.h" +#include "util.h" + +#include + +/* Parses all labels in @t and appends them to @l. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_section if @t contains a "[section]" yasm directive. + * Sections are currently not supported. + * Returns -err_label_addr if the address for a label could not be + * determined. + */ +extern int parse_yasm_labels(struct label *l, const struct text *t); + +/* Modifies @s, so it can be used as a label, if @s actually looks like + * a label. + * + * Returns true if @s looks like a label; false otherwise. + * Returns -err_internal if @l or @name is the NULL pointer. + */ +extern int make_label(char *s); + +/* Represents the state of the pt directive parser. The parser uses the + * canonical yasm lst file syntax to follow all asm source files that + * were used during a yasm run. The lst file stores information about + * these files in terms of line numbers and line increments. With this + * information the contents of the lst file can be correlated to the + * actual source files. + */ +struct state { + /* Current line number. */ + int n; + + /* Current line increment for this file. */ + int inc; + + /* Current filename. */ + char *filename; + + /* Pointer to the current line. */ + char *line; +}; + +/* Allocates new state. + * + * Returns a non-NULL state object on success; NULL otherwise. + */ +extern struct state *st_alloc(void); + +/* Deallocates and clears all fields of @st. + * If @st is the NULL pointer, nothing happens. + */ +extern void st_free(struct state *st); + +/* Prints @s to stderr enriched with @st's file and line information. + * + * Returns @errcode on success. + * Returns -err_internal if @st is the NULL pointer or @errcode is + * not negative. + */ +extern int st_print_err(const struct state *st, const char *s, int errcode); + +/* Represents a pt directive with name and payload. */ +struct pt_directive { + /* Name of the directive. */ + char *name; + + /* Length of name. */ + size_t nlen; + + /* Everything between the '(' and ')' in the directive. */ + char *payload; + + /* Length of payoad. */ + size_t plen; +}; + +/* Allocates a new pt directive that can hold a directive name and + * payload of no more than @n characters. + * + * Returns a non-NULL pt directive object on success; NULL otherwise. + */ +extern struct pt_directive *pd_alloc(size_t n); + +/* Deallocates and clears all fields of @pd. + * If @pd is the NULL pointer, nothing happens. + */ +extern void pd_free(struct pt_directive *pd); + +/* Copies @name and @payload to the corresponding fields in @pd. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @pd or @name or @payload is the NULL + * pointer. + */ +extern int pd_set(struct pt_directive *pd, const char *name, + const char *payload); + +/* Parses a pt directive from @st and stores it in @pd. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @pd or @st is the NULL pointer. + */ +extern int pd_parse(struct pt_directive *pd, struct state *st); + +/* Represents a yasm assembled file. */ +struct yasm { + /* Filename of the .asm file. */ + char *pttfile; + + /* Filename of the .lst file. It is the concatenation of + * fileroot and ".lst". + */ + char *lstfile; + + /* Filename of the .bin file. It is the concatenation of + * fileroot and ".bin". + */ + char *binfile; + + /* Fileroot is the pttfile filename, but with a trailing file + * extension removed. It is used to create files based on the + * pttfile and is also used to create the .pt and .exp files + * during the parsing step. + */ + char *fileroot; + + /* The list of files that are encountered while parsing the + * lstfile. + */ + struct file_list *fl; + + /* State of the current assembly file, while parsing the + * lstfile. + */ + struct state *st_asm; + + /* Current line number in the lstfile. */ + int lst_curr_line; + + /* The list of labels found in the lstfile. */ + struct label *l; +}; + +/* Allocates a new yasm container with @pttfile. + * + * Returns a non-NULL yasm container object on success; NULL otherwise. + */ +extern struct yasm *yasm_alloc(const char *pttfile); + +/* Deallocates and clears all field of @y. + * If @y is the NULL pointer, nothing happens. + */ +extern void yasm_free(struct yasm *y); + +/* Assembles the pttfile with yasm and parses all labels. + * + * Returns 0 on success; a negative enum errcode otherwise. + */ +extern int yasm_parse(struct yasm *y); + +/* Looks up @labelname and stores its address in @addr if found. + * + * Returns 0 on success; a negative enum errcode otherwise. + */ +extern int yasm_lookup_label(const struct yasm *y, uint64_t *addr, + const char *labelname); + +/* Looks up the special section label "section_@name_@attribute" and stores + * its value in @value if found. + * + * Valid attributes are: + * + * - start the section's start address in the binary file + * - vstart the section's virtual load address + * - length the section's size in bytes + * + * Returns 0 on success; a negative enum errcode otherwise. + */ +extern int yasm_lookup_section_label(const struct yasm *y, const char *name, + const char *attribute, uint64_t *value); + +/* Stores the next pt directive in @pd. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @y or @pd is the NULL pointer. + * Returns -err_no_directive if there is no pt directive left. + */ +extern int yasm_next_pt_directive(struct yasm *y, struct pt_directive *pd); + +/* Calls pd_parse for the current file and line. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_no_directive if the current source line contains no PT + * directive. + */ +extern int yasm_pd_parse(struct yasm *y, struct pt_directive *pd); + +/* Stores the next line in the asm file into @dest. The memory behind + * @dest must be large enough to store @destlen bytes. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @y is the NULL pointer or @dest is NULL, but + * @destlen is non-zero. + */ +extern int yasm_next_line(struct yasm *y, char *dest, size_t destlen); + +/* Prints the error message @s together with errstr[@errcode]. File and + * line information are printed regarding the current state of @y. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @errcode is not negative. + */ +extern int yasm_print_err(const struct yasm *y, const char *s, int errcode); + +#endif /* YASM_H */ diff --git a/pttc/src/errcode.c b/pttc/src/errcode.c new file mode 100644 index 0000000..37b698b --- /dev/null +++ b/pttc/src/errcode.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "errcode.h" + +const char *errstr[] = { + "success", + + "cannot open file", + "cannot read file", + "cannot get file size", + "cannot write file", + "out of range", + + "label has no address", + "yasm directive 'org' is required", + "no pt directive", + "no such label", + "label name is too long", + "label name is not unique", + + "failed to find section name", + "failed to find value for section attribute", + "unknown section attribute", + + "missing ')'", + "missing '('", + + "parse error", + "integer cannot be parsed", + "integer too big", + "ipc missing or has invalid value", + "ip missing", + "no arguments", + "trailing tokens", + "unknown character", + "unknown directive", + "missing directive", + + "pt library error", + + "run failed", + + "unspecified error", + + "out of memory", + + "internal error", + + "processing stopped", + + "max error code", +}; diff --git a/pttc/src/file.c b/pttc/src/file.c new file mode 100644 index 0000000..e2226e2 --- /dev/null +++ b/pttc/src/file.c @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "errcode.h" +#include "file.h" +#include "util.h" + +#include +#include +#include +#include + +struct text *text_alloc(const char *s) +{ + size_t n, i; + char **line; + struct text *t; + + t = calloc(1, sizeof(struct text)); + if (!t) + return NULL; + + /* If s is NULL or empty, there is nothing to do. */ + if (!s || *s == '\0') + return t; + + /* beginning of s is the first line. */ + t->n = 1; + t->line = calloc(1, sizeof(*t->line)); + if (!t->line) + goto error; + + t->line[0] = duplicate_str(s); + if (!t->line[0]) + goto error; + + /* iterate through all chars and make \r?\n to \0. */ + n = strlen(t->line[0]); + for (i = 0; i < n; i++) { + if (t->line[0][i] == '\r') { + if (i+1 >= n) { + /* the file ends with \r. */ + t->line[0][i] = '\0'; + break; + } + /* terminate the line string if it's a line end. */ + if (t->line[0][i+1] == '\n') + t->line[0][i] = '\0'; + + } else if (t->line[0][i] == '\n') { + /* set newline character always to \0. */ + t->line[0][i] = '\0'; + if (i+1 >= n) { + /* the file ends with \n. */ + break; + } + /* increase line pointer buffer. */ + line = realloc(t->line, (t->n+1) * sizeof(*t->line)); + if (!line) + goto error; + t->line = line; + /* point to the next character after the + * newline and increment the number of lines. + */ + t->line[t->n++] = &(t->line[0][i+1]); + } + } + + return t; + +error: + text_free(t); + return NULL; +} + +void text_free(struct text *t) +{ + if (!t) + return; + + if (t->line) + free(t->line[0]); + free(t->line); + free(t); +} + +int text_line(const struct text *t, char *dest, size_t destlen, size_t n) +{ + if (bug_on(!t)) + return -err_internal; + + if (bug_on(!dest && destlen)) + return -err_internal; + + if (n >= t->n) + return -err_out_of_range; + + if (!dest) + return 0; + + if (!destlen) + return -err_internal; + + strncpy(dest, t->line[n], destlen); + + /* Make sure the string is terminated. */ + dest[destlen-1] = '\0'; + return 0; +} + +struct file_list *fl_alloc(void) +{ + return calloc(1, sizeof(struct file_list)); +} + +void fl_free(struct file_list *fl) +{ + if (!fl) + return; + + fl_free(fl->next); + text_free(fl->text); + free(fl->filename); + free(fl); +} + +/* Appends the @filename to @fl and stores a pointer to the internal + * text structure in @t. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @fl or @t is the NULL pointer. + * Returns -err_file_stat if @filename could not be found. + * Returns -err_file_open if @filename could not be opened. + * Returns -err_file_read if the content of @filename could not be fully + * read. + */ +int fl_append(struct file_list *fl, struct text **t, const char *filename) +{ + int errcode; + FILE *f; + char *s; + long pos; + size_t fsize; + size_t read; + + if (bug_on(!fl)) + return -err_internal; + + if (bug_on(!t)) + return -err_internal; + + if (bug_on(!filename)) + return -err_internal; + + s = NULL; + *t = NULL; + + while (fl->next) + fl = fl->next; + + fl->next = fl_alloc(); + if (!fl->next) { + errcode = -err_no_mem; + goto error; + } + + fl->next->filename = duplicate_str(filename); + if (!fl->next->filename) { + errcode = -err_no_mem; + goto error; + } + + errno = 0; + f = fopen(filename, "rb"); + if (!f) { + fprintf(stderr, "open %s failed: %s\n", + filename, strerror(errno)); + errcode = -err_file_open; + goto error; + } + + errcode = fseek(f, 0, SEEK_END); + if (errcode) { + fprintf(stderr, "%s: failed to seek end: %s\n", + filename, strerror(errno)); + errcode = -err_file_size; + goto error_file; + } + + pos = ftell(f); + if (pos < 0) { + fprintf(stderr, "%s: failed to determine file size: %s\n", + filename, strerror(errno)); + errcode = -err_file_size; + goto error_file; + } + fsize = (size_t) pos; + + errcode = fseek(f, 0, SEEK_SET); + if (errcode) { + fprintf(stderr, "%s: failed to seek begin: %s\n", + filename, strerror(errno)); + errcode = -err_file_size; + goto error_file; + } + + s = calloc(fsize+1, 1); /* size + 1: space for last null byte. */ + if (!s) { + errcode = -err_no_mem; + goto error_file; + } + + read = fread(s, 1, fsize, f); + fclose(f); + if (read != fsize) { + fprintf(stderr, "read %s failed\n", filename); + errcode = -err_file_read; + goto error; + } + + *t = text_alloc(s); + if (!*t) { + errcode = -err_no_mem; + goto error; + } + + free(s); + fl->next->text = *t; + + return 0; + +error_file: + fclose(f); +error: + /* filename is closed after reading before handling error. */ + fl_free(fl->next); + fl->next = NULL; + free(s); + text_free(*t); + *t = NULL; + return errcode; +} + +int fl_getline(struct file_list *fl, char *dest, size_t destlen, + const char *filename, size_t n) +{ + int errcode; + const struct text *t; + + if (bug_on(!fl)) + return -err_internal; + + errcode = fl_gettext(fl, &t, filename); + if (errcode < 0) + return errcode; + + return text_line(t, dest, destlen, n); +} + +int fl_gettext(struct file_list *fl, const struct text **t, + const char *filename) +{ + struct text *tmp; + int errcode; + + if (bug_on(!fl)) + return -err_internal; + + if (bug_on(!t)) + return -err_internal; + + if (bug_on(!filename)) + return -err_internal; + + while (fl->next) { + fl = fl->next; + if (strcmp(fl->filename, filename) == 0) { + *t = fl->text; + return 0; + } + } + errcode = fl_append(fl, &tmp, filename); + if (errcode < 0) + return errcode; + + *t = tmp; + return 0; +} diff --git a/pttc/src/main.c b/pttc/src/main.c new file mode 100644 index 0000000..2126ad3 --- /dev/null +++ b/pttc/src/main.c @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pttc.h" + +#include "pt_cpu.h" + +#include +#include +#include + +/* Prints this tools version number and libipt version number on stdout. */ +static void version(const char *prog) +{ + struct pt_version v; + + v = pt_library_version(); + printf("%s-%d.%d.%d%s / libipt-%" PRIu8 ".%" PRIu8 ".%" PRIu32 "%s\n", + prog, PT_VERSION_MAJOR, PT_VERSION_MINOR, PT_VERSION_BUILD, + PT_VERSION_EXT, v.major, v.minor, v.build, v.ext); +} + +/* Prints usage information to stdout. */ +static void help(const char *prog) +{ + printf("usage: %s [] \n\n" + "options:\n" + " --help|-h this text.\n" + " --version display version information and exit.\n" + " --cpu none|auto|f/m[/s] set cpu to the given value and encode according to:\n" + " none spec (default)\n" + " auto current cpu\n" + " f/m[/s] family/model[/stepping]\n" + " the annotated yasm input file.\n", + prog); +} + +int main(int argc, char *argv[]) +{ + struct pttc_options options; + const char *prog; + int errcode, i; + + prog = argv[0]; + memset(&options, 0, sizeof(options)); + + for (i = 1; i < argc;) { + const char *arg; + + arg = argv[i++]; + + if (strcmp(arg, "--help") == 0 || strcmp(arg, "-h") == 0) { + help(prog); + return 0; + } + if (strcmp(arg, "--version") == 0) { + version(prog); + return 0; + } + if (strcmp(arg, "--cpu") == 0) { + arg = argv[i++]; + + if (strcmp(arg, "auto") == 0) { + errcode = pt_cpu_read(&options.cpu); + if (errcode < 0) { + fprintf(stderr, + "%s: error reading cpu: %s.\n", + prog, + pt_errstr(pt_errcode(errcode))); + return 1; + } + continue; + } + + if (strcmp(arg, "none") == 0) { + memset(&options.cpu, 0, sizeof(options.cpu)); + continue; + } + + errcode = pt_cpu_parse(&options.cpu, arg); + if (errcode < 0) { + fprintf(stderr, + "%s: cpu must be specified as f/m[/s].\n", + prog); + return 1; + } + continue; + } + + if (arg[0] == '-') { + fprintf(stderr, "%s: unrecognized option '%s'.\n", + prog, arg); + return 1; + } + + if (options.pttfile) { + fprintf(stderr, + "%s: only one pttfile can be specified.\n", + prog); + return 1; + } + options.pttfile = arg; + } + + if (!options.pttfile) { + fprintf(stderr, "%s: no pttfile specified.\n", prog); + fprintf(stderr, "Try '%s -h' for more information.\n", prog); + return 1; + } + + return pttc_main(&options); +} diff --git a/pttc/src/parse.c b/pttc/src/parse.c new file mode 100644 index 0000000..5c4fdb5 --- /dev/null +++ b/pttc/src/parse.c @@ -0,0 +1,1102 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "errcode.h" +#include "parse.h" +#include "util.h" + +#include +#include +#include +#include + +const char *pt_suffix = ".pt"; +const char *exp_suffix = ".exp"; + +enum { + pd_len = 1024 +}; + +/* Deallocates the memory used by @p, closes all files, clears and + * zeroes the fields. + */ +static void p_free(struct parser *p) +{ + if (!p) + return; + + yasm_free(p->y); + pd_free(p->pd); + l_free(p->pt_labels); + free(p->ptfilename); + + free(p); +} + +/* Initializes @p with @pttfile and @conf. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @p is the NULL pointer. + */ +static struct parser *p_alloc(const char *pttfile, const struct pt_config *conf) +{ + size_t n; + struct parser *p; + + if (!conf) + return NULL; + + if (!pttfile) + return NULL; + + p = calloc(1, sizeof(*p)); + if (!p) + return NULL; + + p->y = yasm_alloc(pttfile); + if (!p->y) + goto error; + + n = strlen(p->y->fileroot) + 1; + + p->ptfilename = malloc(n+strlen(pt_suffix)); + if (!p->ptfilename) + goto error; + + strcpy(p->ptfilename, p->y->fileroot); + strcat(p->ptfilename, pt_suffix); + + p->pd = pd_alloc(pd_len); + if (!p->pd) + goto error; + + p->pt_labels = l_alloc(); + if (!p->pt_labels) + goto error; + + p->conf = conf; + + return p; + +error: + p_free(p); + return NULL; +} + +/* Generates an .exp filename following the scheme: + * [-].exp + */ +static char *expfilename(struct parser *p, const char *extra) +{ + char *filename; + /* reserve enough space to hold the string + * "-cpu_fffff_mmm_sss" + 1 for the trailing null character. + */ + char cpu_suffix[19]; + size_t n; + + if (!extra) + extra = ""; + *cpu_suffix = '\0'; + + /* determine length of resulting filename, which looks like: + * [-][-cpu___].exp + */ + n = strlen(p->y->fileroot); + + if (*extra != '\0') + /* the extra string is prepended with a -. */ + n += 1 + strlen(extra); + + if (p->conf->cpu.vendor != pcv_unknown) { + struct pt_cpu cpu; + + cpu = p->conf->cpu; + if (cpu.stepping) + n += sprintf(cpu_suffix, + "-cpu_%" PRIu16 "_%" PRIu8 "_%" PRIu8 "", + cpu.family, cpu.model, cpu.stepping); + else + n += sprintf(cpu_suffix, + "-cpu_%" PRIu16 "_%" PRIu8 "", cpu.family, + cpu.model); + } + + n += strlen(exp_suffix); + + /* trailing null character. */ + n += 1; + + filename = malloc(n); + if (!filename) + return NULL; + + strcpy(filename, p->y->fileroot); + if (*extra != '\0') { + strcat(filename, "-"); + strcat(filename, extra); + } + strcat(filename, cpu_suffix); + strcat(filename, exp_suffix); + + return filename; +} + +/* Returns true if @c is part of a label; false otherwise. */ +static int islabelchar(int c) +{ + if (isalnum(c)) + return 1; + + switch (c) { + case '_': + return 1; + } + + return 0; +} + +/* Generates the content of the .exp file by printing all lines with + * everything up to and including the first comment semicolon removed. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @p is the NULL pointer. + * Returns -err_file_write if the .exp file could not be fully written. + */ +static int p_gen_expfile(struct parser *p) +{ + int errcode; + enum { slen = 1024 }; + char s[slen]; + struct pt_directive *pd; + char *filename; + FILE *f; + + if (bug_on(!p)) + return -err_internal; + + pd = p->pd; + + /* the directive in the current line must be the .exp directive. */ + errcode = yasm_pd_parse(p->y, pd); + if (bug_on(errcode < 0)) + return -err_internal; + + if (bug_on(strcmp(pd->name, ".exp") != 0)) + return -err_internal; + + filename = expfilename(p, pd->payload); + if (!filename) + return -err_no_mem; + f = fopen(filename, "w"); + if (!f) { + free(filename); + return -err_file_open; + } + + for (;;) { + int i; + char *line, *comment; + + errcode = yasm_next_line(p->y, s, slen); + if (errcode < 0) + break; + + errcode = yasm_pd_parse(p->y, pd); + if (errcode < 0 && errcode != -err_no_directive) + break; + + if (errcode == 0 && strcmp(pd->name, ".exp") == 0) { + fclose(f); + printf("%s\n", filename); + free(filename); + filename = expfilename(p, pd->payload); + if (!filename) + return -err_no_mem; + f = fopen(filename, "w"); + if (!f) { + free(filename); + return -err_file_open; + } + continue; + } + + line = strchr(s, ';'); + if (!line) + continue; + + line += 1; + + comment = strchr(line, '#'); + if (comment) + *comment = '\0'; + + /* remove trailing spaces. */ + for (i = (int) strlen(line)-1; i >= 0 && isspace(line[i]); i--) + line[i] = '\0'; + + for (;;) { + char *tmp, label[256]; + uint64_t addr; + int zero_padding, qmark_padding, qmark_size, status; + + zero_padding = 0; + qmark_padding = 0; + qmark_size = 0; + status = 0; + + /* find the label character in the string. + * if there is no label character, we just print + * the rest of the line and end. + */ + tmp = strchr(line, '%'); + if (!tmp) { + if (fprintf(f, "%s", line) < 0) { + errcode = -err_file_write; + goto error; + } + break; + } + + /* make the label character a null byte and + * print the first portion, which does not + * belong to the label into the file. + */ + *tmp = '\0'; + if (fprintf(f, "%s", line) < 0) { + errcode = -err_file_write; + goto error; + } + + /* test if there is a valid label name after the %. */ + line = tmp+1; + if (*line == '\0' || isspace(*line)) { + errcode = -err_no_label; + goto error; + } + + /* check if zero padding is requested. */ + if (*line == '0') { + zero_padding = 1; + line += 1; + } + /* chek if ? padding is requested. */ + else if (*line == '?') { + qmark_padding = 1; + zero_padding = 1; + qmark_size = 0; + line += 1; + } + + /* advance i to the first non alpha-numeric + * character. all characters everything from + * line[0] to line[i-1] belongs to the label + * name. + */ + for (i = 0; islabelchar(line[i]); i++) + ; + + if (i > 255) { + errcode = -err_label_name; + goto error; + } + strncpy(label, line, i); + label[i] = '\0'; + + /* advance to next character. */ + line = &line[i]; + + /* lookup the label name and print it to the + * output file. + */ + errcode = yasm_lookup_label(p->y, &addr, label); + if (errcode < 0) { + errcode = l_lookup(p->pt_labels, &addr, label); + if (errcode < 0) + goto error; + + if (zero_padding) + status = fprintf(f, "%016" PRIx64, addr); + else + status = fprintf(f, "%" PRIx64, addr); + + if (status < 0) { + errcode = -err_file_write; + goto error; + } + + continue; + } + + /* check if masking is requested. */ + if (*line == '.') { + char *endptr; + long int n; + + line += 1; + + n = strtol(line, &endptr, 0); + /* check if strtol made progress and + * stops on a space or null byte. + * otherwise the int could not be + * parsed. + */ + if (line == endptr || + (*endptr != '\0' && !isspace(*endptr) + && !ispunct(*endptr))) { + errcode = -err_parse_int; + goto error; + } + addr &= (1ull << (n << 3)) - 1ull; + line = endptr; + + qmark_size = 8 - n; + } + + if (qmark_padding) { + for (i = 0; i < qmark_size; ++i) { + status = fprintf(f, "??"); + if (status < 0) { + errcode = -err_file_write; + goto error; + } + } + + for (; i < 8; ++i) { + uint8_t byte; + + byte = (uint8_t)(addr >> ((7 - i) * 8)); + + status = fprintf(f, "%02" PRIx8, byte); + if (status < 0) { + errcode = -err_file_write; + goto error; + } + } + } else if (zero_padding) + status = fprintf(f, "%016" PRIx64, addr); + else + status = fprintf(f, "%" PRIx64, addr); + + if (status < 0) { + errcode = -err_file_write; + goto error; + } + + } + + if (fprintf(f, "\n") < 0) { + errcode = -err_file_write; + goto error; + } + } + +error: + + fclose(f); + if (errcode < 0 && errcode != -err_out_of_range) { + fprintf(stderr, "fatal: %s could not be created:\n", filename); + yasm_print_err(p->y, "", errcode); + remove(filename); + } else + printf("%s\n", filename); + free(filename); + + /* If there are no lines left, we are done. */ + if (errcode == -err_out_of_range) + return 0; + + return errcode; +} + +static void p_close_files(struct parser *p) +{ + if (p->ptfile) { + fclose(p->ptfile); + p->ptfile = NULL; + } +} + +static int p_open_files(struct parser *p) +{ + p->ptfile = fopen(p->ptfilename, "wb"); + if (!p->ptfile) { + fprintf(stderr, "open %s failed\n", p->ptfilename); + goto error; + } + return 0; + +error: + p_close_files(p); + return -err_file_open; +} + +/* Processes the current directive. + * If the encoder returns an error, a message including current file and + * line number together with the pt error string is printed on stderr. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_internal if @p or @e is the NULL pointer. + * Returns -err_parse_missing_directive if there was a pt directive marker, + * but no directive. + * Returns -stop_process if the .exp directive was encountered. + * Returns -err_pt_lib if the pt encoder returned an error. + * Returns -err_parse if a general parsing error was encountered. + * Returns -err_parse_unknown_directive if there was an unknown pt directive. + */ +static int p_process(struct parser *p, struct pt_encoder *e) +{ + int bytes_written; + int errcode; + char *directive, *payload, *pt_label_name, *tmp; + struct pt_directive *pd; + struct pt_packet packet; + + if (bug_on(!p)) + return -err_internal; + + if (bug_on(!e)) + return -err_internal; + + pd = p->pd; + if (!pd) + return -err_internal; + + directive = pd->name; + payload = pd->payload; + + pt_label_name = NULL; + bytes_written = 0; + errcode = 0; + + /* find a label name. */ + tmp = strchr(directive, ':'); + if (tmp) { + uint64_t x; + + pt_label_name = directive; + directive = tmp+1; + *tmp = '\0'; + + /* ignore whitespace between label and directive. */ + while (isspace(*directive)) + directive += 1; + + /* if we can lookup a yasm label with the same name, the + * current pt directive label is invalid. */ + errcode = yasm_lookup_label(p->y, &x, pt_label_name); + if (errcode == 0) + errcode = -err_label_not_unique; + + if (errcode != -err_no_label) + return yasm_print_err(p->y, "label lookup", + errcode); + + /* if we can lookup a pt directive label with the same + * name, the current pt directive label is invalid. */ + errcode = l_lookup(p->pt_labels, &x, pt_label_name); + if (errcode == 0) + errcode = -err_label_not_unique; + + if (errcode != -err_no_label) + return yasm_print_err(p->y, "label lookup", + -err_label_not_unique); + } + + /* now try to match the directive string and call the + * corresponding function that parses the payload and emits an + * according packet. + */ + if (strcmp(directive, "") == 0) + return yasm_print_err(p->y, "invalid syntax", + -err_parse_missing_directive); + else if (strcmp(directive, ".exp") == 0) { + /* this is the end of processing pt directives, so we + * add a p_last label to the pt directive labels. + */ + errcode = l_append(p->pt_labels, "eos", p->pt_bytes_written); + if (errcode < 0) + return yasm_print_err(p->y, "append label", errcode); + + return -stop_process; + } + + if (strcmp(directive, "psb") == 0) { + errcode = parse_empty(payload); + if (errcode < 0) { + yasm_print_err(p->y, "psb: parsing failed", errcode); + goto error; + } + packet.type = ppt_psb; + } else if (strcmp(directive, "psbend") == 0) { + errcode = parse_empty(payload); + if (errcode < 0) { + yasm_print_err(p->y, "psbend: parsing failed", errcode); + goto error; + } + packet.type = ppt_psbend; + } else if (strcmp(directive, "pad") == 0) { + errcode = parse_empty(payload); + if (errcode < 0) { + yasm_print_err(p->y, "pad: parsing failed", errcode); + goto error; + } + packet.type = ppt_pad; + } else if (strcmp(directive, "ovf") == 0) { + errcode = parse_empty(payload); + if (errcode < 0) { + yasm_print_err(p->y, "ovf: parsing failed", errcode); + goto error; + } + packet.type = ppt_ovf; + } else if (strcmp(directive, "stop") == 0) { + errcode = parse_empty(payload); + if (errcode < 0) { + yasm_print_err(p->y, "stop: parsing failed", errcode); + goto error; + } + packet.type = ppt_stop; + } else if (strcmp(directive, "tnt") == 0) { + errcode = parse_tnt(&packet.payload.tnt.payload, + &packet.payload.tnt.bit_size, payload); + if (errcode < 0) { + yasm_print_err(p->y, "tnt: parsing failed", errcode); + goto error; + } + packet.type = ppt_tnt_8; + } else if (strcmp(directive, "tnt64") == 0) { + errcode = parse_tnt(&packet.payload.tnt.payload, + &packet.payload.tnt.bit_size, payload); + if (errcode < 0) { + yasm_print_err(p->y, "tnt64: parsing failed", errcode); + goto error; + } + packet.type = ppt_tnt_64; + } else if (strcmp(directive, "tip") == 0) { + errcode = parse_ip(p, &packet.payload.ip.ip, + &packet.payload.ip.ipc, payload); + if (errcode < 0) { + yasm_print_err(p->y, "tip: parsing failed", errcode); + goto error; + } + packet.type = ppt_tip; + } else if (strcmp(directive, "tip.pge") == 0) { + errcode = parse_ip(p, &packet.payload.ip.ip, + &packet.payload.ip.ipc, payload); + if (errcode < 0) { + yasm_print_err(p->y, "tip.pge: parsing failed", + errcode); + goto error; + } + packet.type = ppt_tip_pge; + } else if (strcmp(directive, "tip.pgd") == 0) { + errcode = parse_ip(p, &packet.payload.ip.ip, + &packet.payload.ip.ipc, payload); + if (errcode < 0) { + yasm_print_err(p->y, "tip.pgd: parsing failed", + errcode); + goto error; + } + packet.type = ppt_tip_pgd; + } else if (strcmp(directive, "fup") == 0) { + errcode = parse_ip(p, &packet.payload.ip.ip, + &packet.payload.ip.ipc, payload); + if (errcode < 0) { + yasm_print_err(p->y, "fup: parsing failed", errcode); + goto error; + } + packet.type = ppt_fup; + } else if (strcmp(directive, "mode.exec") == 0) { + if (strcmp(payload, "16bit") == 0) { + packet.payload.mode.bits.exec.csl = 0; + packet.payload.mode.bits.exec.csd = 0; + } else if (strcmp(payload, "64bit") == 0) { + packet.payload.mode.bits.exec.csl = 1; + packet.payload.mode.bits.exec.csd = 0; + } else if (strcmp(payload, "32bit") == 0) { + packet.payload.mode.bits.exec.csl = 0; + packet.payload.mode.bits.exec.csd = 1; + } else { + errcode = yasm_print_err(p->y, + "mode.exec: argument must be one of \"16bit\", \"64bit\" or \"32bit\"", + -err_parse); + goto error; + } + packet.payload.mode.leaf = pt_mol_exec; + packet.type = ppt_mode; + } else if (strcmp(directive, "mode.tsx") == 0) { + if (strcmp(payload, "begin") == 0) { + packet.payload.mode.bits.tsx.intx = 1; + packet.payload.mode.bits.tsx.abrt = 0; + } else if (strcmp(payload, "abort") == 0) { + packet.payload.mode.bits.tsx.intx = 0; + packet.payload.mode.bits.tsx.abrt = 1; + } else if (strcmp(payload, "commit") == 0) { + packet.payload.mode.bits.tsx.intx = 0; + packet.payload.mode.bits.tsx.abrt = 0; + } else { + errcode = yasm_print_err(p->y, + "mode.tsx: argument must be one of \"begin\", \"abort\" or \"commit\"", + -err_parse); + goto error; + } + packet.payload.mode.leaf = pt_mol_tsx; + packet.type = ppt_mode; + } else if (strcmp(directive, "pip") == 0) { + const char *modifier; + + errcode = parse_uint64(&packet.payload.pip.cr3, payload); + if (errcode < 0) { + yasm_print_err(p->y, "pip: parsing failed", errcode); + goto error; + } + packet.type = ppt_pip; + packet.payload.pip.nr = 0; + + modifier = strtok(NULL, " ,"); + if (modifier) { + if (strcmp(modifier, "nr") == 0) + packet.payload.pip.nr = 1; + else { + yasm_print_err(p->y, "pip: parsing failed", + -err_parse_trailing_tokens); + goto error; + } + } + } else if (strcmp(directive, "tsc") == 0) { + errcode = parse_uint64(&packet.payload.tsc.tsc, payload); + if (errcode < 0) { + yasm_print_err(p->y, "tsc: parsing failed", errcode); + goto error; + } + packet.type = ppt_tsc; + } else if (strcmp(directive, "cbr") == 0) { + errcode = parse_uint8(&packet.payload.cbr.ratio, payload); + if (errcode < 0) { + yasm_print_err(p->y, "cbr: parsing cbr failed", + errcode); + goto error; + } + packet.type = ppt_cbr; + } else if (strcmp(directive, "tma") == 0) { + errcode = parse_tma(&packet.payload.tma.ctc, + &packet.payload.tma.fc, payload); + if (errcode < 0) { + yasm_print_err(p->y, "tma: parsing tma failed", + errcode); + goto error; + } + packet.type = ppt_tma; + } else if (strcmp(directive, "mtc") == 0) { + errcode = parse_uint8(&packet.payload.mtc.ctc, payload); + if (errcode < 0) { + yasm_print_err(p->y, "mtc: parsing mtc failed", + errcode); + goto error; + } + packet.type = ppt_mtc; + } else if (strcmp(directive, "cyc") == 0) { + errcode = parse_uint64(&packet.payload.cyc.value, payload); + if (errcode < 0) { + yasm_print_err(p->y, "cyc: parsing cyc failed", + errcode); + goto error; + } + packet.type = ppt_cyc; + } else if (strcmp(directive, "vmcs") == 0) { + errcode = parse_uint64(&packet.payload.vmcs.base, payload); + if (errcode < 0) { + yasm_print_err(p->y, "vmcs: parsing failed", errcode); + goto error; + } + packet.type = ppt_vmcs; + } else if (strcmp(directive, "mnt") == 0) { + errcode = parse_uint64(&packet.payload.mnt.payload, payload); + if (errcode < 0) { + yasm_print_err(p->y, "mnt: parsing failed", errcode); + goto error; + } + packet.type = ppt_mnt; + } else { + errcode = yasm_print_err(p->y, "invalid syntax", + -err_parse_unknown_directive); + goto error; + } + + bytes_written = pt_enc_next(e, &packet); + if (bytes_written < 0) { + const char *errtext, *format; + char *msg; + size_t n; + + errtext = pt_errstr(pt_errcode(bytes_written)); + format = "encoder error in directive %s (status %s)"; + /* the length of format includes the "%s" (-2) + * characters, we add errtext (+-0) and then we need + * space for a terminating null-byte (+1). + */ + n = strlen(format)-4 + strlen(directive) + strlen(errtext) + 1; + + msg = malloc(n); + if (!msg) + errcode = yasm_print_err(p->y, + "encoder error not enough memory to show error code", + -err_pt_lib); + else { + sprintf(msg, format, directive, errtext); + errcode = yasm_print_err(p->y, msg, -err_pt_lib); + free(msg); + } + } else { + if (pt_label_name) { + errcode = l_append(p->pt_labels, pt_label_name, + p->pt_bytes_written); + if (errcode < 0) + goto error; + } + p->pt_bytes_written += bytes_written; + } + +error: + if (errcode < 0) + bytes_written = errcode; + return bytes_written; +} + +/* Starts the parsing process. + * + * Returns 0 on success; a negative enum errcode otherwise. + * Returns -err_pt_lib if the pt encoder could not be initialized. + * Returns -err_file_write if the .pt or .exp file could not be fully + * written. + */ +int p_start(struct parser *p) +{ + int errcode; + + if (bug_on(!p)) + return -err_internal; + + errcode = yasm_parse(p->y); + if (errcode < 0) + return errcode; + + for (;;) { + int bytes_written; + struct pt_encoder *e; + + errcode = yasm_next_pt_directive(p->y, p->pd); + if (errcode < 0) + break; + + e = pt_alloc_encoder(p->conf); + if (!e) { + fprintf(stderr, "pt_alloc_encoder failed\n"); + errcode = -err_pt_lib; + break; + } + + bytes_written = p_process(p, e); + + pt_free_encoder(e); + + if (bytes_written == -stop_process) { + errcode = p_gen_expfile(p); + break; + } + if (bytes_written < 0) { + errcode = bytes_written; + break; + } + if (fwrite(p->conf->begin, 1, bytes_written, p->ptfile) + != (size_t)bytes_written) { + fprintf(stderr, "write %s failed", p->ptfilename); + errcode = -err_file_write; + break; + } + } + + /* If there is no directive left, there's nothing more to do. */ + if (errcode == -err_no_directive) + return 0; + + return errcode; +} + +int parse(const char *pttfile, const struct pt_config *conf) +{ + int errcode; + struct parser *p; + + p = p_alloc(pttfile, conf); + if (!p) + return -err_no_mem; + + errcode = p_open_files(p); + if (errcode < 0) + goto error; + + errcode = p_start(p); + p_close_files(p); + +error: + p_free(p); + return errcode; +} + +int parse_empty(char *payload) +{ + if (!payload) + return 0; + + strtok(payload, " "); + if (!payload || *payload == '\0') + return 0; + + return -err_parse_trailing_tokens; +} + +int parse_tnt(uint64_t *tnt, uint8_t *size, char *payload) +{ + char c; + + if (bug_on(!size)) + return -err_internal; + + if (bug_on(!tnt)) + return -err_internal; + + *size = 0; + *tnt = 0ull; + + if (!payload) + return 0; + + while (*payload != '\0') { + c = *payload; + payload++; + if (isspace(c) || c == '.') + continue; + *size += 1; + *tnt <<= 1; + switch (c) { + case 'n': + break; + case 't': + *tnt |= 1; + break; + default: + return -err_parse_unknown_char; + } + } + + return 0; +} + +static int check_ipc(enum pt_ip_compression ipc) +{ + switch (ipc) { + case pt_ipc_suppressed: + case pt_ipc_update_16: + case pt_ipc_update_32: + case pt_ipc_update_48: + case pt_ipc_sext_48: + case pt_ipc_full: + return 0; + } + return -err_parse_ipc; +} + +int parse_ip(struct parser *p, uint64_t *ip, enum pt_ip_compression *ipc, + char *payload) +{ + int errcode; + char *endptr; + + if (bug_on(!ip)) + return -err_internal; + + if (bug_on(!ipc)) + return -err_internal; + + *ipc = pt_ipc_suppressed; + *ip = 0; + + payload = strtok(payload, " :"); + if (!payload || *payload == '\0') + return -err_parse_no_args; + + *ipc = (enum pt_ip_compression) strtol(payload, &endptr, 0); + if (payload == endptr || *endptr != '\0') + return -err_parse_ipc; + + /* is ipc valid? */ + errcode = check_ipc(*ipc); + if (errcode < 0) + return errcode; + + payload = strtok(NULL, " :"); + if (!payload) + return -err_parse_ip_missing; + + /* can be resolved to a label? */ + if (*payload == '%') { + if (!p) + return -err_internal; + + errcode = yasm_lookup_label(p->y, ip, payload + 1); + if (errcode < 0) + return errcode; + } else { + /* can be parsed as address? */ + errcode = str_to_uint64(payload, ip, 0); + if (errcode < 0) + return errcode; + } + + /* no more tokens left. */ + payload = strtok(NULL, " "); + if (payload) + return -err_parse_trailing_tokens; + + return 0; +} + +int parse_uint64(uint64_t *x, char *payload) +{ + int errcode; + + if (bug_on(!x)) + return -err_internal; + + payload = strtok(payload, " ,"); + if (!payload) + return -err_parse_no_args; + + errcode = str_to_uint64(payload, x, 0); + if (errcode < 0) + return errcode; + + return 0; +} + +int parse_uint32(uint32_t *x, char *payload) +{ + int errcode; + + if (bug_on(!x)) + return -err_internal; + + payload = strtok(payload, " ,"); + if (!payload) + return -err_parse_no_args; + + errcode = str_to_uint32(payload, x, 0); + if (errcode < 0) + return errcode; + + return 0; +} + +int parse_uint16(uint16_t *x, char *payload) +{ + int errcode; + + if (bug_on(!x)) + return -err_internal; + + payload = strtok(payload, " ,"); + if (!payload) + return -err_parse_no_args; + + errcode = str_to_uint16(payload, x, 0); + if (errcode < 0) + return errcode; + + return 0; +} + +int parse_uint8(uint8_t *x, char *payload) +{ + int errcode; + + if (bug_on(!x)) + return -err_internal; + + payload = strtok(payload, " ,"); + if (!payload) + return -err_parse_no_args; + + errcode = str_to_uint8(payload, x, 0); + if (errcode < 0) + return errcode; + + return 0; +} + +int parse_tma(uint16_t *ctc, uint16_t *fc, char *payload) +{ + char *endptr; + long int i; + + if (bug_on(!ctc || !fc)) + return -err_internal; + + payload = strtok(payload, ","); + if (!payload || *payload == '\0') + return -err_parse_no_args; + + i = strtol(payload, &endptr, 0); + if (payload == endptr || *endptr != '\0') + return -err_parse_int; + + if (i > 0xffffl) + return -err_parse_int_too_big; + + *ctc = (uint16_t)i; + + payload = strtok(NULL, " ,"); + if (!payload) + return -err_parse_no_args; + + i = strtol(payload, &endptr, 0); + if (payload == endptr || *endptr != '\0') + return -err_parse_int; + + if (i > 0xffffl) + return -err_parse_int_too_big; + + *fc = (uint16_t)i; + + /* no more tokens left. */ + payload = strtok(NULL, " "); + if (payload) + return -err_parse_trailing_tokens; + + return 0; +} diff --git a/pttc/src/posix/util.c b/pttc/src/posix/util.c new file mode 100644 index 0000000..17c9479 --- /dev/null +++ b/pttc/src/posix/util.c @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "errcode.h" +#include "util.h" + +#include +#include +#include +#include +#include +#include + +int run(const char *file, char *const argv[]) +{ + pid_t pid; + int status; + + if (bug_on(!file)) + return -err_internal; + + if (bug_on(!argv)) + return -err_internal; + + pid = fork(); + + if (!pid) { + execvp(file, argv); + perror(argv[0]); + exit(1); + } + if (waitpid(pid, &status, 0) < 0) + return -err_other; + + if (!WIFEXITED(status)) + return -err_other; + + if (WEXITSTATUS(status)) + return -err_run; + + return 0; +} diff --git a/pttc/src/pttc.c b/pttc/src/pttc.c new file mode 100644 index 0000000..494eb92 --- /dev/null +++ b/pttc/src/pttc.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "errcode.h" +#include "pttc.h" +#include "parse.h" + +int pttc_main(const struct pttc_options *options) +{ + int errcode; + enum { buflen = 1024 }; + uint8_t buf[buflen]; + struct pt_config conf; + + pt_config_init(&conf); + conf.cpu = options->cpu; + conf.begin = buf; + conf.end = buf+buflen; + + /* apply errata for the chosen cpu. */ + errcode = pt_cpu_errata(&conf.errata, &conf.cpu); + if (errcode < 0) { + fprintf(stderr, "fatal: errata configuration failed %d: %s\n", + errcode, pt_errstr(pt_errcode(errcode))); + return errcode; + } + + errcode = parse(options->pttfile, &conf); + if (errcode < 0 && errcode != -err_run) + fprintf(stderr, "fatal: %s\n", errstr[-errcode]); + + return -errcode; + +} diff --git a/pttc/src/util.c b/pttc/src/util.c new file mode 100644 index 0000000..e27a5a0 --- /dev/null +++ b/pttc/src/util.c @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "errcode.h" +#include "util.h" + +#include +#include +#include +#include +#include + +char *duplicate_str(const char *s) +{ + char *dup; + + if (!s) + return NULL; + + dup = malloc(strlen(s)+1); + if (!dup) + return NULL; + return strcpy(dup, s); +} + +int str_to_uint64(const char *str, uint64_t *val, int base) +{ + char *endptr; + uint64_t x; + + if (!str || !val) + return -err_internal; + + errno = 0; + x = strtoull(str, &endptr, base); + + if (errno == EINVAL) + return -err_parse_int; + + if (errno == ERANGE) + return -err_parse_int_too_big; + + if (str == endptr || *endptr != '\0') + return -err_parse_int; + + *val = x; + + return 0; +} + +int str_to_uint32(const char *str, uint32_t *val, int base) +{ + uint64_t x; + int errcode; + + if (!str || !val) + return -err_internal; + + errcode = str_to_uint64(str, &x, base); + if (errcode < 0) + return errcode; + + if (UINT32_MAX < x) + return -err_parse_int_too_big; + + *val = (uint32_t) x; + return 0; +} + +int str_to_uint16(const char *str, uint16_t *val, int base) +{ + uint64_t x; + int errcode; + + if (!str || !val) + return -err_internal; + + errcode = str_to_uint64(str, &x, base); + if (errcode < 0) + return errcode; + + if (UINT16_MAX < x) + return -err_parse_int_too_big; + + *val = (uint16_t) x; + return 0; +} + +int str_to_uint8(const char *str, uint8_t *val, int base) +{ + uint64_t x; + int errcode; + + if (!str || !val) + return -err_internal; + + errcode = str_to_uint64(str, &x, base); + if (errcode < 0) + return errcode; + + if (UINT8_MAX < x) + return -err_parse_int_too_big; + + *val = (uint8_t) x; + return 0; +} + +int do_bug_on(int cond, const char *condstr, const char *file, int line) +{ + if (cond) + fprintf(stderr, "%s:%d: internal error: %s\n", file, line, + condstr); + return cond; +} +struct label *l_alloc(void) +{ + return calloc(1, sizeof(struct label)); +} + +void l_free(struct label *l) +{ + if (!l) + return; + + l_free(l->next); + free(l->name); + free(l); +} + +int l_append(struct label *l, const char *name, uint64_t addr) +{ + int errcode; + + if (bug_on(!l)) + return -err_internal; + + if (bug_on(!name)) + return -err_internal; + + /* skip to the last label. */ + while (l->next) { + l = l->next; + + /* ignore the first label, which has no name. */ + if (strcmp(l->name, name) == 0) + return -err_label_not_unique; + } + + /* append a new label. */ + l->next = l_alloc(); + if (!l->next) + return -err_no_mem; + + /* save the name. */ + l->next->name = duplicate_str(name); + if (!l->next->name) { + errcode = -err_no_mem; + goto error; + } + + /* save the address. */ + l->next->addr = addr; + + return 0; +error: + free(l->next->name); + free(l->next); + l->next = NULL; + return errcode; +} + +int l_lookup(const struct label *l, uint64_t *addr, + const char *name) +{ + if (bug_on(!l)) + return -err_internal; + + if (bug_on(!addr)) + return -err_internal; + + if (bug_on(!name)) + return -err_internal; + + + *addr = 0; + while (l->next) { + l = l->next; + if (strcmp(l->name, name) == 0) { + *addr = l->addr; + return 0; + } + } + return -err_no_label; +} + +struct label *l_find(struct label *l, const char *name) +{ + if (bug_on(!l)) + return NULL; + + if (bug_on(!name)) + return NULL; + + + while (l->next) { + l = l->next; + + if (bug_on(!l->name)) + continue; + + if (strcmp(l->name, name) == 0) + return l; + } + return NULL; +} diff --git a/pttc/src/windows/util.c b/pttc/src/windows/util.c new file mode 100644 index 0000000..7ac4327 --- /dev/null +++ b/pttc/src/windows/util.c @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "errcode.h" +#include "util.h" + +#include +#include +#include +#include +#include + + +int run(const char *file, char *const argv[]) +{ + int errcode; + + int i; + size_t size; + char *args; + + STARTUPINFO si; + PROCESS_INFORMATION pi; + DWORD exit_code; + + DWORD dwret; + BOOL bret; + + + errcode = 0; + + if (bug_on(!file)) { + errcode = -err_internal; + goto out; + } + + if (bug_on(!argv)) { + errcode = -err_internal; + goto out; + } + + + /* calculate length of command line - this is the cumulative length of + * all arguments, plus two quotation marks (to make it quoted strings + * and allow for spaces in file/path names), plus a space after each + * arguments as delimiter (after the last arguments it's a terminating + * zero-byte instead of the space). * + */ + size = 0; + for (i = 0; argv[i]; ++i) + size += strlen(argv[i]) + 3; + + /* allocate command line string */ + args = calloc(size, 1); + if (!args) + return -err_no_mem; + + /* construct command line string, putting quotation marks + * around every argument of the vector and a space after it + */ + size = 0; + for (i = 0; argv[i]; ++i) { + args[size++] = '"'; + strcpy(args + size, argv[i]); + size += strlen(argv[i]); + args[size++] = '"'; + args[size++] = ' '; + } + /* transform last space into a terminating zero-byte and fix up size */ + args[--size] = '\0'; + + + /* initialize process/startup info */ + memset(&pi, 0, sizeof(pi)); + memset(&si, 0, sizeof(si)); + si.cb = sizeof(si); + + /* create process - since the first parameter is NULL, the + * second parameter represents a command as it would behave + * on a command shell + */ + bret = CreateProcess(NULL, args, + NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi); + if (!bret) { + errcode = -err_other; + goto out_args; + } + + dwret = WaitForSingleObject(pi.hProcess, INFINITE); + if (dwret == WAIT_FAILED) { + errcode = -err_other; + goto out_handles; + } + + bret = GetExitCodeProcess(pi.hProcess, &exit_code); + if (!bret) { + errcode = -err_other; + goto out_handles; + } + + if (exit_code != 0) + errcode = -err_run; + + +out_handles: + CloseHandle(pi.hProcess); + CloseHandle(pi.hThread); +out_args: + free(args); +out: + return errcode; +} diff --git a/pttc/src/yasm.c b/pttc/src/yasm.c new file mode 100644 index 0000000..bf06f49 --- /dev/null +++ b/pttc/src/yasm.c @@ -0,0 +1,835 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "errcode.h" +#include "file.h" +#include "util.h" +#include "yasm.h" + +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) && (_MSC_VER < 1900) +# define snprintf _snprintf_c +#endif + + +static int create_section_label_name(char *label, int size, const char *name, + const char *attribute) +{ + int written; + + written = snprintf(label, size, "section_%s_%s", name, attribute); + if (size <= written) + return -err_no_mem; + + return 0; +} + +static int add_section_label(struct label *l, const char *name, + const char *attribute, uint64_t value, + struct label **length) +{ + char label[255]; + int errcode; + + errcode = create_section_label_name(label, sizeof(label), name, + attribute); + if (errcode < 0) + return errcode; + + errcode = l_append(l, label, value); + if (errcode < 0) + return errcode; + + if (length) + *length = l_find(l, label); + + return 0; +} + +static int parse_section_label(struct label *l, const char *name, + const char *attribute) +{ + uint64_t addr; + char *value; + + value = strtok(NULL, " ]"); + if (!value) + return -err_section_attribute_no_value; + + if (sscanf(value, "%" PRIx64, &addr) != 1) + return -err_parse_int; + + return add_section_label(l, name, attribute, addr, NULL); +} + +static int parse_section(char *line, struct label *l, struct label **length) +{ + char *name, *attribute; + int errcode; + + name = strtok(line, " "); + if (!name) + return -err_section_no_name; + + /* we initialize the section's length to zero - it will be updated + * when we process the section's content. + */ + errcode = add_section_label(l, name, "length", 0ull, length); + if (errcode < 0) + return errcode; + + for (;;) { + attribute = strtok(NULL, " =]"); + if (!attribute) + return 0; + + if (strcmp(attribute, "start") == 0) { + errcode = parse_section_label(l, name, "start"); + if (errcode < 0) + return errcode; + } else if (strcmp(attribute, "vstart") == 0) { + errcode = parse_section_label(l, name, "vstart"); + if (errcode < 0) + return errcode; + } else + return -err_section_unknown_attribute; + } +} + +static int lookup_section_label(struct label *l, const char *name, + const char *attribute, uint64_t *value) +{ + char label[255]; + int errcode; + + errcode = create_section_label_name(label, sizeof(label), name, + attribute); + if (errcode < 0) + return errcode; + + return l_lookup(l, value, label); +} + +static int lookup_section_vstart(struct label *l, char *line, + uint64_t *vstart) +{ + char *name; + + name = strtok(line, " "); + if (!name) + return -err_section_no_name; + + return lookup_section_label(l, name, "vstart", vstart); +} + +int parse_yasm_labels(struct label *l, const struct text *t) +{ + int errcode, no_org_directive; + size_t i; + uint64_t base_addr; + enum { linelen = 1024 }; + char line[linelen]; + struct label *length; + + if (bug_on(!t)) + return -err_internal; + + base_addr = 0; + no_org_directive = 1; + length = NULL; + + /* determine base address from org directive and insert special + * section labels. + */ + for (i = 0; i < t->n; i++) { + char *tmp; + + errcode = text_line(t, line, linelen, i); + if (errcode < 0) + return errcode; + + tmp = strstr(line, "[section"); + if (tmp) { + tmp += strlen("[section"); + errcode = parse_section(tmp, l, &length); + if (errcode < 0) + return errcode; + continue; + } + + tmp = strstr(line, "[org"); + if (tmp) { + base_addr = strtol(tmp+strlen("[org"), NULL, 0); + + errcode = l_append(l, "org", base_addr); + if (errcode < 0) + return errcode; + + no_org_directive = 0; + continue; + } + + /* update the section__length label, if we have one. + * + * this must be last; it destroys @line. + */ + if (length) { + uint64_t value, size; + + tmp = strtok(line, " "); + if (!tmp) + continue; + + /* we expect a line number. */ + errcode = str_to_uint64(tmp, &value, 10); + if (errcode < 0) + continue; + + tmp = strtok(NULL, " "); + if (!tmp) + continue; + + /* we expect an address. */ + errcode = str_to_uint64(tmp, &value, 16); + if (errcode < 0) + continue; + + tmp = strtok(NULL, " "); + if (!tmp) + continue; + + /* we expect an opcode. */ + errcode = str_to_uint64(tmp, &value, 16); + if (errcode < 0) + continue; + + /* we got an opcode - let's compute it's size. */ + for (size = 0; value != 0; value >>= 8) + size += 1; + + /* update the section__length label. */ + length->addr += size; + } + } + + if (no_org_directive) + return -err_no_org_directive; + + for (i = 0; i < t->n; i++) { + char *tmp, *name; + uint64_t addr; + + errcode = text_line(t, line, linelen, i); + if (errcode < 0) + goto error; + + /* Change the base on section switches. */ + tmp = strstr(line, "[section"); + if (tmp) { + tmp += strlen("[section"); + errcode = lookup_section_vstart(l, tmp, &base_addr); + if (errcode < 0) + return errcode; + continue; + } + + /* skip line number count. */ + tmp = strtok(line, " "); + if (!tmp) + continue; + + /* the label can now be on the same line as the memory + * address or on a line by its own. + * we look at the next token and (1) if it looks like a + * label, we search in the following lines for the + * corresponding address; or (2) if it looks like an + * address, we store it and see if the token after the + * opcode looks like a token; or (3) none of the above, + * we continue with the next line. + */ + + /* second token after the line number count. it's + * either an address; or a label. + */ + tmp = strtok(NULL, " "); + if (!tmp) + continue; + + if (!make_label(tmp)) { + /* get address in case we find a label later. */ + if (sscanf(tmp, "%" PRIx64, &addr) != 1) + continue; + + /* skip the opcode token. */ + tmp = strtok(NULL, " "); + if (!tmp) + continue; + + /* this might be a label now. */ + tmp = strtok(NULL, " "); + if (!make_label(tmp)) + continue; + + errcode = l_append(l, tmp, addr + base_addr); + if (errcode < 0) + goto error; + continue; + } + name = duplicate_str(tmp); + if (!name) { + errcode = -err_no_mem; + goto error; + } + + /* there was a label so now an address needs to + * be found. + */ + errcode = -err_label_addr; + for (i += 1; i < t->n; i++) { + int errcode_text; + + errcode_text = text_line(t, line, linelen, i); + if (errcode_text < 0) { + errcode = errcode_text; + break; + } + if (sscanf(line, "%*d %" PRIx64 " %*x %*s", &addr) + == 1) { + errcode = l_append(l, name, addr + base_addr); + break; + } + } + if (errcode == -err_label_addr) + fprintf(stderr, "label '%s' has no address\n", name); + free(name); + if (errcode < 0) + goto error; + } + + return 0; + +error: + l_free(l->next); + free(l->name); + l->next = NULL; + l->name = NULL; + return errcode; +} + +int make_label(char *s) +{ + size_t n; + + if (bug_on(!s)) + return -err_internal; + + n = strlen(s); + if (n == 0 || s[n-1] != ':') + return 0; + + s[n-1] = '\0'; + return 1; +} + +struct state *st_alloc(void) +{ + return calloc(1, sizeof(struct state)); +} + +void st_free(struct state *st) +{ + if (!st) + return; + + free(st->filename); + free(st->line); + free(st); +} + +int st_print_err(const struct state *st, const char *s, int errcode) +{ + if (bug_on(!st)) + return -err_internal; + + if (bug_on(!(-err_max < errcode && errcode < 0))) + return -err_internal; + + if (!s) + s = ""; + + fprintf(stderr, "%s:%d: error: %s (%s)\n", st->filename, st->n-1, s, + errstr[-errcode]); + + return errcode; +} + +/* Sets current @filename, increment (@inc) and line number (@n) in @st. + * + * Note that @filename, @inc and @n correspond to the yasm .lst file + * source file information. + * + * Returns 0 on success; a negative enum errcode otherwise. + */ +static int st_set_file(struct state *st, const char *filename, int inc, int n) +{ + if (bug_on(!st)) + return -err_internal; + + if (bug_on(!filename)) + return -err_internal; + + free(st->filename); + st->filename = duplicate_str(filename); + if (!st->filename) + return -err_no_mem; + st->inc = inc; + st->n = n; + return 0; +} + +/* Sets current line in @st to @s and increases the line number. + * + * Returns 0 on success; a negative enum errcode otherwise. + */ +static int st_update(struct state *st, const char *s) +{ + free(st->line); + st->line = duplicate_str(s); + if (!st->line) + return -err_no_mem; + + st->n += st->inc; + return 0; +} + +struct pt_directive *pd_alloc(size_t n) +{ + struct pt_directive *pd; + + pd = calloc(1, sizeof(*pd)); + if (!pd) + return NULL; + + pd->name = malloc(n); + if (!pd->name) + goto error; + + pd->payload = malloc(n); + if (!pd->payload) + goto error; + + pd->nlen = n; + pd->plen = n; + + return pd; + +error: + pd_free(pd); + return NULL; +} + +void pd_free(struct pt_directive *pd) +{ + if (!pd) + return; + + free(pd->name); + free(pd->payload); + free(pd); +} + +int pd_set(struct pt_directive *pd, const char *name, const char *payload) +{ + if (bug_on(!pd)) + return -err_internal; + + if (bug_on(!name)) + return -err_internal; + + if (bug_on(!payload)) + return -err_internal; + + strncpy(pd->name, name, pd->nlen); + if (pd->nlen > 0) + pd->name[pd->nlen - 1] = '\0'; + strncpy(pd->payload, payload, pd->plen); + if (pd->plen > 0) + pd->payload[pd->plen - 1] = '\0'; + + return 0; +} + +/* Magic annotation marker. */ +const char *marker = "@pt "; + +int pd_parse(struct pt_directive *pd, struct state *st) +{ + char *line, *comment, *ptdirective, *openpar, *closepar; + char *directive, *payload; + int errcode; + char *c; + + if (bug_on(!pd)) + return -err_internal; + + if (bug_on(!st)) + return -err_internal; + + + line = duplicate_str(st->line); + if (!line) + return -err_no_mem; + + /* make line lower case. */ + for (c = line; *c; ++c) + *c = (char) tolower(*c); + + /* if the current line is not a comment or contains no magic marker + * -err_no_directive is returned. + */ + errcode = -err_no_directive; + + /* search where the comment begins. */ + comment = strchr(line, ';'); + + /* if there is no comment in the line, we don't have anything to + * do. + */ + if (!comment) + goto cleanup; + + /* search for @pt marker. */ + ptdirective = strstr(comment+1, marker); + + /* if there is no such marker in the comment, we don't have + * anything to do. + */ + if (!ptdirective) + goto cleanup; + + /* directive found, now parse the payload. */ + errcode = 0; + + /* find position of next '(', separating the directive and the + * payload. + */ + openpar = strchr(ptdirective, '('); + if (!openpar) { + errcode = -err_missing_openpar; + st_print_err(st, "invalid syntax", errcode); + goto cleanup; + } + + /* find position of next ')', marking the end of the payload */ + closepar = strchr(openpar, ')'); + if (!closepar) { + errcode = -err_missing_closepar; + st_print_err(st, "invalid syntax", errcode); + goto cleanup; + } + + /* make "multiple" strings by artifically terminating them with + * '\0' then get directive and payload substrings, which will + * have leading and trailing whitespace "removed". + */ + *openpar = '\0'; + *closepar = '\0'; + + /* skip leading whitespace. */ + directive = ptdirective + strlen(marker); + while (isspace(*directive)) + directive += 1; + + payload = openpar+1; + + errcode = pd_set(pd, directive, payload); + +cleanup: + free(line); + return errcode; +} + +const char *bin_suffix = ".bin"; +const char *lst_suffix = ".lst"; +const char path_separator = '/'; +enum { + max_filename_len = 1024 +}; + +struct yasm *yasm_alloc(const char *pttfile) +{ + char *tmp; + size_t n; + struct yasm *y; + + if (bug_on(!pttfile)) + return NULL; + + y = calloc(1, sizeof(*y)); + if (!y) + return NULL; + + y->fl = fl_alloc(); + if (!y->fl) + goto error; + + y->st_asm = st_alloc(); + if (!y->st_asm) + goto error; + + y->fileroot = duplicate_str(pttfile); + if (!y->fileroot) + goto error; + + y->pttfile = duplicate_str(pttfile); + if (!y->pttfile) + goto error; + + tmp = strrchr(y->fileroot, '.'); + if (tmp) + *tmp = '\0'; + + tmp = strrchr(y->fileroot, path_separator); + if (tmp) { + tmp += 1; + memmove(y->fileroot, tmp, strlen(tmp)+1); + } + + y->binfile = malloc(strlen(y->fileroot)+strlen(bin_suffix)+1); + if (!y->binfile) + goto error; + + y->lstfile = malloc(strlen(y->fileroot)+strlen(lst_suffix)+1); + if (!y->lstfile) + goto error; + + n = strlen(y->fileroot); + + strcpy(y->binfile, y->fileroot); + strcpy(y->binfile+n, bin_suffix); + strcpy(y->lstfile, y->fileroot); + strcpy(y->lstfile+n, lst_suffix); + + y->l = l_alloc(); + if (!y->l) + goto error; + + return y; + +error: + yasm_free(y); + return 0; +} + +static int yasm_run(struct yasm *y) +{ + char *argv[] = { + "yasm", + "", + "-f", "bin", + "-o", "", + "-L", "nasm", + "-l", "", + NULL, + }; + + argv[1] = y->pttfile; + argv[5] = y->binfile; + argv[9] = y->lstfile; + + return run(argv[0], argv); +} + +int yasm_parse(struct yasm *y) +{ + int errcode; + const struct text *t; + + if (bug_on(!y)) + return -err_internal; + + errcode = yasm_run(y); + if (errcode < 0) + goto error; + + errcode = fl_gettext(y->fl, &t, y->lstfile); + if (errcode < 0) + goto error; + + errcode = parse_yasm_labels(y->l, t); + if (errcode < 0) + goto error; + +error: + return errcode; +} + +void yasm_free(struct yasm *y) +{ + if (!y) + return; + + free(y->fileroot); + free(y->pttfile); + free(y->lstfile); + free(y->binfile); + fl_free(y->fl); + st_free(y->st_asm); + l_free(y->l); + free(y); +} + +int yasm_lookup_label(const struct yasm *y, uint64_t *addr, + const char *labelname) +{ + if (bug_on(!y)) + return -err_internal; + + + return l_lookup(y->l, addr, labelname); +} + +static int yasm_advance_next_line(struct yasm *y) +{ + enum { slen = 1024 }; + char s[slen]; + char filename[max_filename_len]; + int errcode; + int asm_line, asm_inc; + + if (bug_on(!y)) + return -err_internal; + + + for (;;) { + errcode = fl_getline(y->fl, s, slen, y->lstfile, + y->lst_curr_line); + /* always advance in lst file. */ + y->lst_curr_line += 1; + + if (errcode < 0) + break; + + /* if the current lst file line is a line directive, set + * state information to this file, line and increment + * and continue. + */ + if (sscanf(s, "%*d %%line %d+%d %1023[^\r\n]", &asm_line, + &asm_inc, filename) == 3) { + st_set_file(y->st_asm, filename, asm_line, asm_inc); + continue; + } + + /* if line number or increment in the previous line + * directive is <= 0, the current lst line has no + * corresponding line in the source file. + */ + if (y->st_asm->n <= 0 || y->st_asm->inc <= 0) + continue; + + /* finally the current line in the lst file can be + * correlated to the source file, so we retrieve the + * line from it and update the state. + */ + errcode = fl_getline(y->fl, s, slen, y->st_asm->filename, + y->st_asm->n-1); + st_update(y->st_asm, s); + break; + } + + return errcode; +} + +int yasm_pd_parse(struct yasm *y, struct pt_directive *pd) +{ + return pd_parse(pd, y->st_asm); +} + +int yasm_next_pt_directive(struct yasm *y, struct pt_directive *pd) +{ + int errcode; + + for (;;) { + errcode = yasm_advance_next_line(y); + if (errcode < 0) + break; + + errcode = pd_parse(pd, y->st_asm); + if (errcode != -err_no_directive) + return errcode; + + } + if (errcode == -err_out_of_range) + errcode = -err_no_directive; + + return errcode; +} + +int yasm_next_line(struct yasm *y, char *dest, size_t destlen) +{ + int errcode; + + if (!destlen) + return 0; + + if (bug_on(!dest)) + return -err_internal; + + errcode = yasm_advance_next_line(y); + if (errcode < 0) + return errcode; + + strncpy(dest, y->st_asm->line, destlen); + dest[destlen-1] = '\0'; + + return 0; +} + +int yasm_print_err(const struct yasm *y, const char *s, int errcode) +{ + if (bug_on(!y)) + return -err_internal; + + + return st_print_err(y->st_asm, s, errcode); +} + +int yasm_lookup_section_label(const struct yasm *y, const char *name, + const char *attribute, uint64_t *value) +{ + if (bug_on(!y)) + return -err_internal; + + return lookup_section_label(y->l, name, attribute, value); +} diff --git a/pttc/test/src/test_all_directives.ptt b/pttc/test/src/test_all_directives.ptt new file mode 100644 index 0000000..4994308 --- /dev/null +++ b/pttc/test/src/test_all_directives.ptt @@ -0,0 +1,58 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +org 0x42 +; @pt psb() +; @pt psbend() +; @pt pad() +; @pt ovf() +; @pt stop() +; @pt tnt(tnnnt) +; @pt tnt64(tnntnntnntt) +; @pt tip(3: 0x1000) +; @pt tip.pge(2: 0x2000) +; @pt tip.pgd(1: 0x3000) +; @pt fup(3: 0x4000) +; @pt mode.exec(16bit) +; @pt mode.tsx(begin) +; @pt pip(0xafafaf) +; @pt pip(0xafafaf, nr) +; @pt tsc(12345) +; @pt cbr(244) +; @pt tma(0x257, 0x1cd) +; @pt mtc(0xf0) +; @pt cyc(0x3) +; @pt cyc(0xfa3) +; @pt .exp() +;line1 + +;line3 + + ; line5 trailing space +; @pt .exp(extra) +;a #comment +;b +;c diff --git a/pttc/test/src/test_exp_labels.ptt b/pttc/test/src/test_exp_labels.ptt new file mode 100644 index 0000000..a6d2a16 --- /dev/null +++ b/pttc/test/src/test_exp_labels.ptt @@ -0,0 +1,58 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +org 0x4242 + +; @pt p1:psb() +; @pt p2:psbend() +l1: +nop + +l2: +nop + +l_3: nop + +; @pt .exp() +;%l1 # print address of l1 +;(%l1) # print address of l1 +;%l1 %l2 # print address of l1 and l2 +;l1 %l2 # print address of l2 +;%l1 l2 # print address of l1 +;%0l1 # print address of l1 zero padded +;%l2.0 # print zero +;(%l2.0) # print zero +;%l2.1 # print address of l2, only last byte. +;%l2.2 # print address of l2, only last 2 bytes. +;%0l2.2 # print address of l2, only last 2 bytes, zero padded. +;%0l2.3 # print address of l2, last 3 bytes, zero padded. + +;%l_3 # print l_3 + +;%p1 # print packet 1 +;%p2 # print packet 2 + +;%eos # print eos byte offset diff --git a/pttc/test/src/test_label_addr.ptt b/pttc/test/src/test_label_addr.ptt new file mode 100644 index 0000000..0c396e8 --- /dev/null +++ b/pttc/test/src/test_label_addr.ptt @@ -0,0 +1,31 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +bits 64 +org 0x1000 +l1: +nop +l2: diff --git a/ptunit/CMakeLists.txt b/ptunit/CMakeLists.txt new file mode 100644 index 0000000..a298cf1 --- /dev/null +++ b/ptunit/CMakeLists.txt @@ -0,0 +1,43 @@ +# Copyright (c) 2013-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +set(PTUNIT_FILES + src/ptunit.c +) + +if (CMAKE_HOST_UNIX) + set(PTUNIT_FILES ${PTUNIT_FILES} src/posix/ptunit_mkfile.c) +endif (CMAKE_HOST_UNIX) + +if (CMAKE_HOST_WIN32) + set(PTUNIT_FILES ${PTUNIT_FILES} src/windows/ptunit_mkfile.c) +endif (CMAKE_HOST_WIN32) + +add_library(ptunit STATIC + ${PTUNIT_FILES} +) + +add_ptunit_c_test(selftest) diff --git a/ptunit/include/ptunit.h b/ptunit/include/ptunit.h new file mode 100644 index 0000000..85904d0 --- /dev/null +++ b/ptunit/include/ptunit.h @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PTUNIT_H +#define PTUNIT_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +/* A source location for reporting unit test fails. */ +struct ptunit_srcloc { + /* The source file. */ + const char *file; + + /* The source line. */ + uint32_t line; +}; + +/* A unit test result type. + * + * This distinguishes the various potential results of a unit test. + */ +enum ptunit_result_type { + /* The test has passed. */ + ptur_passed, + + /* The test has been skipped. */ + ptur_skipped, + + /* The test failed a signed/unsigned integer comparison. */ + ptur_failed_signed_int, + ptur_failed_unsigned_int, + + /* The test failed a pointer comparison. */ + ptur_failed_pointer, + + /* The test failed a string comparison. */ + ptur_failed_str +}; + +/* A unit test result. + * + * We separate test execution and result reporting. A unit test function + * returns a structured result that can later be used for reporting. + */ +struct ptunit_failed_signed_int { + /* The expression that failed. */ + const char *expr; + + /* A string representation of the comparison operation. */ + const char *cmp; + + /* The expected value. */ + int64_t expected; + + /* The actual value. */ + int64_t actual; +}; + +struct ptunit_failed_unsigned_int { + /* The expression that failed. */ + const char *expr; + + /* A string representation of the comparison operation. */ + const char *cmp; + + /* The expected value. */ + uint64_t expected; + + /* The actual value. */ + uint64_t actual; +}; + +struct ptunit_failed_pointer { + /* The expression that failed. */ + const char *expr; + + /* A string representation of the comparison operation. */ + const char *cmp; + + /* The expected value. */ + const void *expected; + + /* The actual value. */ + const void *actual; +}; + +struct ptunit_failed_str { + /* The expression that failed. */ + const char *expr; + + /* A string representation of the comparison operation. */ + const char *cmp; + + /* The expected value. */ + char *expected; + + /* The actual value. */ + char *actual; +}; + +struct ptunit_result { + /* The test result type. */ + enum ptunit_result_type type; + + /* Test result details depending on the result type. */ + struct { + /* The source location of the fail. */ + struct ptunit_srcloc where; + + union { + struct ptunit_failed_signed_int signed_int; + struct ptunit_failed_unsigned_int unsigned_int; + struct ptunit_failed_pointer pointer; + struct ptunit_failed_str str; + } variant; + } failed; +}; + +/* A unit test function. */ +typedef struct ptunit_result (*ptunit_tfun_t)(void); + +/* A unit test. + * + * This is used for logging and reporting. + * + * It is not used for running tests or even for storing tests to be run at a + * later time. + */ +struct ptunit_test { + /* The test name. */ + const char *name; + + /* The optional test arguments. */ + const char *args; + + /* The test result. */ + struct ptunit_result result; +}; + +/* A unit test suite. + * + * This is a simple summary of all tests that have been run. + */ +struct ptunit_suite { + /* An optional suite name. */ + const char *name; + + /* The number of total tests. */ + uint32_t nr_tests; + + /* The number of tests that have been skipped. */ + uint32_t nr_skips; + + /* The number of tests that have failed. */ + uint32_t nr_fails; +}; + +/* Create a unit test source location. */ +extern struct ptunit_srcloc ptunit_mk_srcloc(const char *file, uint32_t line); + +#define ptu_here() ptunit_mk_srcloc(__FILE__, __LINE__) + + +/* Create unit test passed and not run results. */ +extern struct ptunit_result ptunit_mk_passed(void); +extern struct ptunit_result ptunit_mk_skipped(void); + +/* Create a unit test failed signed int result. */ +extern struct ptunit_result ptunit_mk_failed_signed_int(const char *expr, + const char *cmp, + struct ptunit_srcloc, + int64_t actual, + int64_t expected); + +#define ptunit_int_cmp(A, E, C) \ + do { \ + int64_t a = (A), e = (E); \ + \ + if (!(a C e)) \ + return ptunit_mk_failed_signed_int(#A #C #E, #C, \ + ptu_here(), \ + a, e); \ + } while (0) + + +/* Create a unit test failed unsigned int result. */ +extern struct ptunit_result ptunit_mk_failed_unsigned_int(const char *expr, + const char *cmp, + struct ptunit_srcloc, + int64_t actual, + int64_t expected); + +#define ptunit_uint_cmp(A, E, C) \ + do { \ + uint64_t a = (A), e = (E); \ + \ + if (!(a C e)) \ + return ptunit_mk_failed_unsigned_int(#A #C #E, #C, \ + ptu_here(), \ + a, e); \ + } while (0) + + +/* Create a unit test failed pointer result. */ +extern struct ptunit_result ptunit_mk_failed_pointer(const char *expr, + const char *cmp, + struct ptunit_srcloc, + const void *actual, + const void *expected); + +#define ptunit_ptr_cmp(A, E, C) \ + do { \ + const void *a = (A), *e = (E); \ + \ + if (!(a C e)) \ + return ptunit_mk_failed_pointer(#A #C #E, #C, \ + ptu_here(), \ + a, e); \ + } while (0) + + +/* Create a unit test failed string result. */ +extern struct ptunit_result ptunit_mk_failed_str(const char *expr, + const char *cmp, + struct ptunit_srcloc, + const char *actual, + const char *expected); + +#define ptunit_str_cmp(A, E, C) \ + do { \ + const char *a = (A), *e = (E); \ + \ + if (!a || !e || !(strcmp(a, e) C 0)) \ + return ptunit_mk_failed_str(#A "~"#C #E, "~"#C, \ + ptu_here(), \ + a, e); \ + } while (0) + + +/* Run a sub-unit test; return on fail. */ + +#define ptunit_subtest(T, ...) \ + do { \ + struct ptunit_result result; \ + \ + result = (T)(__VA_ARGS__); \ + if (result.type != ptur_passed) \ + return result; \ + } while (0) + + +/* Run a sub-unit test; return on fail from here. */ + +#define ptunit_check(T, ...) \ + do { \ + struct ptunit_result result; \ + \ + result = (T)(__VA_ARGS__); \ + if (result.type != ptur_passed) { \ + result.failed.where = ptu_here(); \ + return result; \ + } \ + } while (0) + + +/* Create a unit test. */ +extern struct ptunit_test ptunit_mk_test(const char *name, const char *args); + +/* Destroy a unit test. */ +extern void ptunit_fini_test(struct ptunit_test *); + +/* Create a unit test suite. */ +extern struct ptunit_suite ptunit_mk_suite(int argc, char **argv); + +/* Log a unit test result. + * + * This may also report test fails depending on the configuration. + */ +extern void ptunit_log_test(struct ptunit_suite *, const struct ptunit_test *); + +/* Print a summary report for a unit test suite. */ +extern void ptunit_report(const struct ptunit_suite *); + +/* Run a single simple unit test and log its result. */ + +#define ptunit_run(S, T) \ + do { \ + struct ptunit_test test; \ + \ + test = ptunit_mk_test(#T, NULL); \ + test.result = (T)(); \ + \ + ptunit_log_test(S, &test); \ + ptunit_fini_test(&test); \ + } while (0) + + +/* Run a single parameterized unit test and log its result. */ + +#define ptunit_run_p(S, T, ...) \ + do { \ + struct ptunit_test test; \ + \ + test = ptunit_mk_test(#T, #__VA_ARGS__); \ + test.result = (T)(__VA_ARGS__); \ + \ + ptunit_log_test(S, &test); \ + ptunit_fini_test(&test); \ + } while (0) + + +/* Run a single unit test with fixture and an explict argument list. + * + * The first argument in the argument list is typically the fixture. + */ + +#define ptunit_frun(R, T, F, ...) \ + do { \ + struct ptunit_result *pr = &(R); \ + \ + pr->type = ptur_passed; \ + if ((F)->init) \ + *pr = (F)->init(F); \ + \ + if (pr->type == ptur_passed) { \ + *pr = (T)(__VA_ARGS__); \ + \ + if ((F)->fini) { \ + if (pr->type == ptur_passed) \ + *pr = (F)->fini(F); \ + else \ + (void) (F)->fini(F); \ + } \ + } \ + } while (0) + + +/* Run a single unit test with fixture and log its result. */ + +#define ptunit_run_f(S, T, F) \ + do { \ + struct ptunit_test test; \ + \ + test = ptunit_mk_test(#T, #F); \ + \ + ptunit_frun(test.result, T, &(F), &(F)); \ + \ + ptunit_log_test(S, &test); \ + ptunit_fini_test(&test); \ + } while (0) + + +/* Run a single parameterized unit test with fixture and log its result. */ + +#define ptunit_run_fp(S, T, F, ...) \ + do { \ + struct ptunit_test test; \ + \ + test = ptunit_mk_test(#T, #F ", " #__VA_ARGS__); \ + \ + ptunit_frun(test.result, T, &(F), &(F), __VA_ARGS__); \ + \ + ptunit_log_test(S, &test); \ + ptunit_fini_test(&test); \ + } while (0) + + + +/* The actual macros to be used in unit tests. + * + * Do not use the above ptunit_ macros directly. + */ + +#define ptu_int_eq(A, E) ptunit_int_cmp(A, E, ==) +#define ptu_int_ne(A, E) ptunit_int_cmp(A, E, !=) +#define ptu_int_gt(A, E) ptunit_int_cmp(A, E, >) +#define ptu_int_ge(A, E) ptunit_int_cmp(A, E, >=) +#define ptu_int_lt(A, E) ptunit_int_cmp(A, E, <) +#define ptu_int_le(A, E) ptunit_int_cmp(A, E, <=) + +#define ptu_uint_eq(A, E) ptunit_uint_cmp(A, E, ==) +#define ptu_uint_ne(A, E) ptunit_uint_cmp(A, E, !=) +#define ptu_uint_gt(A, E) ptunit_uint_cmp(A, E, >) +#define ptu_uint_ge(A, E) ptunit_uint_cmp(A, E, >=) +#define ptu_uint_lt(A, E) ptunit_uint_cmp(A, E, <) +#define ptu_uint_le(A, E) ptunit_uint_cmp(A, E, <=) + +#define ptu_ptr_eq(A, E) ptunit_ptr_cmp(A, E, ==) +#define ptu_ptr_ne(A, E) ptunit_ptr_cmp(A, E, !=) +#define ptu_ptr_gt(A, E) ptunit_ptr_cmp(A, E, >) +#define ptu_ptr_ge(A, E) ptunit_ptr_cmp(A, E, >=) +#define ptu_ptr_lt(A, E) ptunit_ptr_cmp(A, E, <) +#define ptu_ptr_le(A, E) ptunit_ptr_cmp(A, E, <=) +#define ptu_null(A) ptunit_ptr_cmp(A, NULL, ==) +#define ptu_ptr(A) ptunit_ptr_cmp(A, NULL, !=) + +#define ptu_str_eq(A, E) ptunit_str_cmp(A, E, ==) +#define ptu_str_ne(A, E) ptunit_str_cmp(A, E, !=) + +/* Indicate that a unit test passed. */ +#define ptu_passed() ptunit_mk_passed() + +/* Skip a unit test. */ +#define ptu_skipped() ptunit_mk_skipped() + +/* Run a sub-unit test; return on fail. */ +#define ptu_test(T, ...) ptunit_subtest(T, __VA_ARGS__) + +/* Run a sub-unit test; return on fail from here. */ +#define ptu_check(T, ...) ptunit_check(T, __VA_ARGS__) + +/* Run a single unit test. */ +#define ptu_run(S, T) ptunit_run(&(S), T) + +/* Run a single parameterized unit test. */ +#define ptu_run_p(S, T, ...) ptunit_run_p(&(S), T, __VA_ARGS__) + +/* Run a single unit test with fixture. */ +#define ptu_run_f(S, T, F) ptunit_run_f(&(S), T, F) + +/* Run a single parameterized unit test with fixture. */ +#define ptu_run_fp(S, T, F, ...) ptunit_run_fp(&(S), T, F, __VA_ARGS__) + +#ifdef __cplusplus +} +#endif + +#endif /* PTUNIT_H */ diff --git a/ptunit/include/ptunit_mkfile.h b/ptunit/include/ptunit_mkfile.h new file mode 100644 index 0000000..53c237d --- /dev/null +++ b/ptunit/include/ptunit_mkfile.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PTUNIT_MKFILE_H +#define PTUNIT_MKFILE_H + +#include + + +/* Create a temporary file for unit testing. + * + * Creates a new file and opens it with @mode. On success, provides the file + * struct and file name in @file and @filename respectively. + * + * The @file needs to be closed and the @filename needs to be freed after use. + * + * Returns zero on success, a negative error code otherwise. + * Returns -pte_internal if @file or @filename is NULL. + * Returns -pte_nomem if @filename can't be allocated. + */ +int ptunit_mkfile(FILE **file, char **filename, const char *mode); + +#endif /* PTUNIT_MKFILE_H */ diff --git a/ptunit/include/ptunit_threads.h b/ptunit/include/ptunit_threads.h new file mode 100644 index 0000000..cb97ec5 --- /dev/null +++ b/ptunit/include/ptunit_threads.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PTUNIT_THREADS_H +#define PTUNIT_THREADS_H + +#include "ptunit.h" + +#if defined(FEATURE_THREADS) +# include +#endif /* defined(FEATURE_THREADS) */ + + +/* The maximal number of threads. */ +enum { + ptu_thrd_max = 16 +}; + +/* A test fixture component providing threading support. */ +struct ptunit_thrd_fixture { +#if defined(FEATURE_THREADS) + + /* An array of threads created by ptunit_thrd_create(). */ + thrd_t threads[ptu_thrd_max]; + + /* A lock protecting the outer fixture. We don't need it. */ + mtx_t lock; + +#endif /* defined(FEATURE_THREADS) */ + + /* The actual number of created threads. */ + uint8_t nthreads; + + /* The result of joined threads. */ + int result[ptu_thrd_max]; +}; + + +static inline struct ptunit_result +ptunit_thrd_init(struct ptunit_thrd_fixture *tfix) +{ + ptu_ptr(tfix); + + memset(tfix, 0, sizeof(*tfix)); + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_init(&tfix->lock, mtx_plain); + ptu_int_eq(errcode, thrd_success); + } +#endif /* defined(FEATURE_THREADS) */ + + return ptu_passed(); +} + +static inline struct ptunit_result +ptunit_thrd_fini(struct ptunit_thrd_fixture *tfix) +{ + ptu_ptr(tfix); + +#if defined(FEATURE_THREADS) + { + int thrd, errcode[ptu_thrd_max]; + + for (thrd = 0; thrd < tfix->nthreads; ++thrd) + errcode[thrd] = thrd_join(&tfix->threads[thrd], + &tfix->result[thrd]); + + mtx_destroy(&tfix->lock); + + for (thrd = 0; thrd < tfix->nthreads; ++thrd) + ptu_int_eq(errcode[thrd], thrd_success); + } +#endif /* defined(FEATURE_THREADS) */ + + return ptu_passed(); +} + +#if defined(FEATURE_THREADS) + +static inline struct ptunit_result +ptunit_thrd_create(struct ptunit_thrd_fixture *tfix, int (*worker)(void *), + void *arg) +{ + int errcode; + + ptu_ptr(tfix); + + errcode = thrd_create(&tfix->threads[tfix->nthreads++], worker, arg); + ptu_int_eq(errcode, thrd_success); + + return ptu_passed(); +} + +#endif /* defined(FEATURE_THREADS) */ + +static inline struct ptunit_result +ptunit_thrd_lock(struct ptunit_thrd_fixture *tfix) +{ + ptu_ptr(tfix); + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_lock(&tfix->lock); + ptu_int_eq(errcode, thrd_success); + } +#endif /* defined(FEATURE_THREADS) */ + + return ptu_passed(); +} + +static inline struct ptunit_result +ptunit_thrd_unlock(struct ptunit_thrd_fixture *tfix) +{ + ptu_ptr(tfix); + +#if defined(FEATURE_THREADS) + { + int errcode; + + errcode = mtx_unlock(&tfix->lock); + ptu_int_eq(errcode, thrd_success); + } +#endif /* defined(FEATURE_THREADS) */ + + return ptu_passed(); +} + +#endif /* PTUNIT_THREADS_H */ diff --git a/ptunit/src/posix/ptunit_mkfile.c b/ptunit/src/posix/ptunit_mkfile.c new file mode 100644 index 0000000..a681beb --- /dev/null +++ b/ptunit/src/posix/ptunit_mkfile.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define _POSIX_C_SOURCE 200809L + +#include "ptunit_mkfile.h" + +#include "intel-pt.h" + +#include +#include +#include + + +int ptunit_mkfile(FILE **pfile, char **pfilename, const char *mode) +{ + FILE *file; + const char *tmpdir; + const char *tmpfile; + char template[256], *filename; + int fd, len; + + tmpfile = "ptunit-tmp-XXXXXX"; + tmpdir = getenv("TMP"); + if (!tmpdir || !tmpdir[0]) + tmpdir = "/tmp"; + + len = snprintf(template, sizeof(template), "%s/%s", tmpdir, tmpfile); + if (len < 0) + return -pte_not_supported; + + /* We must not truncate the template. */ + if (sizeof(template) <= (size_t) len) + return -pte_not_supported; + + fd = mkstemp(template); + if (fd == -1) + return -pte_not_supported; + + file = fdopen(fd, mode); + if (!file) { + close(fd); + return -pte_not_supported; + } + + filename = strdup(template); + if (!filename) { + fclose(file); + return -pte_nomem; + } + + *pfile = file; + *pfilename = filename; + + return 0; +} diff --git a/ptunit/src/ptunit.c b/ptunit/src/ptunit.c new file mode 100644 index 0000000..d8091e9 --- /dev/null +++ b/ptunit/src/ptunit.c @@ -0,0 +1,340 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include +#include +#include +#include + + +struct ptunit_srcloc ptunit_mk_srcloc(const char *file, uint32_t line) +{ + struct ptunit_srcloc srcloc; + + srcloc.file = file; + srcloc.line = line; + + return srcloc; +} + +struct ptunit_result ptunit_mk_failed_signed_int(const char *expr, + const char *cmp, + struct ptunit_srcloc where, + int64_t actual, + int64_t expected) +{ + struct ptunit_result result; + + result.type = ptur_failed_signed_int; + result.failed.where = where; + result.failed.variant.signed_int.expr = expr; + result.failed.variant.signed_int.cmp = cmp; + result.failed.variant.signed_int.expected = expected; + result.failed.variant.signed_int.actual = actual; + + return result; +} + +struct ptunit_result ptunit_mk_failed_unsigned_int(const char *expr, + const char *cmp, + struct ptunit_srcloc where, + int64_t actual, + int64_t expected) +{ + struct ptunit_result result; + + result.type = ptur_failed_unsigned_int; + result.failed.where = where; + result.failed.variant.unsigned_int.expr = expr; + result.failed.variant.unsigned_int.cmp = cmp; + result.failed.variant.unsigned_int.expected = expected; + result.failed.variant.unsigned_int.actual = actual; + + return result; +} + +struct ptunit_result ptunit_mk_failed_pointer(const char *expr, + const char *cmp, + struct ptunit_srcloc where, + const void *actual, + const void *expected) +{ + struct ptunit_result result; + + result.type = ptur_failed_pointer; + result.failed.where = where; + result.failed.variant.pointer.expr = expr; + result.failed.variant.pointer.cmp = cmp; + result.failed.variant.pointer.expected = expected; + result.failed.variant.pointer.actual = actual; + + return result; +} + +static char *dupstr(const char *str) +{ + char *dup; + size_t len; + + if (!str) + str = "(null)"; + + len = strlen(str); + dup = malloc(len + 1); + if (!dup) + return NULL; + + strncpy(dup, str, len); + dup[len] = 0; + + return dup; +} + +struct ptunit_result ptunit_mk_failed_str(const char *expr, + const char *cmp, + struct ptunit_srcloc where, + const char *actual, + const char *expected) +{ + struct ptunit_result result; + + result.type = ptur_failed_str; + result.failed.where = where; + result.failed.variant.str.expr = expr; + result.failed.variant.str.cmp = cmp; + result.failed.variant.str.expected = dupstr(expected); + result.failed.variant.str.actual = dupstr(actual); + + return result; +} + +struct ptunit_result ptunit_mk_passed(void) +{ + struct ptunit_result result; + + memset(&result, 0, sizeof(result)); + result.type = ptur_passed; + + return result; +} + +struct ptunit_result ptunit_mk_skipped(void) +{ + struct ptunit_result result; + + memset(&result, 0, sizeof(result)); + result.type = ptur_skipped; + + return result; +} + +struct ptunit_test ptunit_mk_test(const char *name, const char *args) +{ + struct ptunit_test test; + + test.name = name; + test.args = args; + test.result = ptunit_mk_skipped(); + + return test; +} + +void ptunit_fini_test(struct ptunit_test *test) +{ + if (!test) + return; + + switch (test->result.type) { + case ptur_skipped: + case ptur_passed: + case ptur_failed_signed_int: + case ptur_failed_unsigned_int: + case ptur_failed_pointer: + break; + + case ptur_failed_str: + free(test->result.failed.variant.str.expected); + free(test->result.failed.variant.str.actual); + break; + } +} + +struct ptunit_suite ptunit_mk_suite(int argc, char **argv) +{ + struct ptunit_suite suite; + + memset(&suite, 0, sizeof(suite)); + + if (argc && argv) + suite.name = argv[0]; + return suite; +} + +static void ptunit_print_test(const struct ptunit_test *test) +{ + fprintf(stderr, "%s", test->name); + + if (test->args) + fprintf(stderr, "(%s)", test->args); + + fprintf(stderr, ": "); +} + +static const char *basename(const char *file) +{ + const char *base; + + if (!file) + return NULL; + + for (base = file + strlen(file); base != file; base -= 1) { + char ch; + + ch = base[-1]; + if ((ch == '/') || (ch == '\\')) + break; + } + + return base; +} + +static void ptunit_print_srcloc(const struct ptunit_test *test) +{ + const char *file; + + switch (test->result.type) { + case ptur_passed: + case ptur_skipped: + fprintf(stderr, "n/a: "); + break; + + case ptur_failed_signed_int: + case ptur_failed_unsigned_int: + case ptur_failed_pointer: + case ptur_failed_str: + file = basename(test->result.failed.where.file); + if (!file) + file = ""; + + fprintf(stderr, "%s:%" PRIu32 ": ", file, + test->result.failed.where.line); + break; + } +} + +static void ptunit_report_test(const struct ptunit_test *test) +{ + switch (test->result.type) { + case ptur_skipped: + case ptur_passed: + return; + + case ptur_failed_signed_int: + ptunit_print_test(test); + ptunit_print_srcloc(test); + fprintf(stderr, "%s [%" PRId64 "%s%" PRId64 "] failed.\n", + test->result.failed.variant.signed_int.expr, + test->result.failed.variant.signed_int.actual, + test->result.failed.variant.signed_int.cmp, + test->result.failed.variant.signed_int.expected); + return; + + case ptur_failed_unsigned_int: + ptunit_print_test(test); + ptunit_print_srcloc(test); + fprintf(stderr, "%s [0x%" PRIx64 "%s0x%" PRIx64 "] failed.\n", + test->result.failed.variant.unsigned_int.expr, + test->result.failed.variant.unsigned_int.actual, + test->result.failed.variant.unsigned_int.cmp, + test->result.failed.variant.unsigned_int.expected); + return; + + case ptur_failed_pointer: + ptunit_print_test(test); + ptunit_print_srcloc(test); + fprintf(stderr, "%s [%p%s%p] failed.\n", + test->result.failed.variant.pointer.expr, + test->result.failed.variant.pointer.actual, + test->result.failed.variant.pointer.cmp, + test->result.failed.variant.pointer.expected); + return; + + case ptur_failed_str: + ptunit_print_test(test); + ptunit_print_srcloc(test); + fprintf(stderr, "%s [%s%s%s] failed.\n", + test->result.failed.variant.str.expr, + test->result.failed.variant.str.actual, + test->result.failed.variant.str.cmp, + test->result.failed.variant.str.expected); + return; + } + + ptunit_print_test(test); + fprintf(stderr, "bad result type: 0x%" PRIx32 ".\n", test->result.type); +} + +void ptunit_log_test(struct ptunit_suite *suite, + const struct ptunit_test *test) +{ + if (!test) + return; + + if (suite) { + suite->nr_tests += 1; + + if (test->result.type == ptur_skipped) + suite->nr_skips += 1; + else if (test->result.type != ptur_passed) + suite->nr_fails += 1; + } + + ptunit_report_test(test); +} + +void ptunit_report(const struct ptunit_suite *suite) +{ + if (!suite) + return; + + if (suite->name) + fprintf(stdout, "%s: ", suite->name); + + fprintf(stdout, + "tests: %" PRIu32 ", passes: %" PRIu32 ", fails: %" PRIu32, + suite->nr_tests, + suite->nr_tests - (suite->nr_fails + suite->nr_skips), + suite->nr_fails); + + if (suite->nr_skips) + fprintf(stdout, " (skipped: %" PRIu32 ")", suite->nr_skips); + + fprintf(stdout, "\n"); +} diff --git a/ptunit/src/windows/ptunit_mkfile.c b/ptunit/src/windows/ptunit_mkfile.c new file mode 100644 index 0000000..a938e1b --- /dev/null +++ b/ptunit/src/windows/ptunit_mkfile.c @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit_mkfile.h" + +#include "intel-pt.h" + +#include +#include + + +int ptunit_mkfile(FILE **pfile, char **pfilename, const char *mode) +{ + char dirbuffer[MAX_PATH], buffer[MAX_PATH], *filename; + const char *dirname; + FILE *file; + DWORD dirlen; + UINT status; + + /* We only support char-based strings. */ + if (sizeof(TCHAR) != sizeof(char)) + return -pte_not_supported; + + dirname = dirbuffer; + dirlen = GetTempPath(sizeof(dirbuffer), dirbuffer); + if (!dirlen || dirlen >= sizeof(dirbuffer)) + dirname = "."; + + status = GetTempFileName(dirname, "ptunit-tmp-", 0, buffer); + if (!status) + return -pte_not_supported; + + file = fopen(buffer, mode); + if (!file) + return -pte_not_supported; + + filename = _strdup(buffer); + if (!filename) { + fclose(file); + return -pte_nomem; + } + + *pfile = file; + *pfilename = filename; + + return 0; +} diff --git a/ptunit/test/src/ptunit-selftest.c b/ptunit/test/src/ptunit-selftest.c new file mode 100644 index 0000000..c562d86 --- /dev/null +++ b/ptunit/test/src/ptunit-selftest.c @@ -0,0 +1,470 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ptunit.h" + +#include + + +static struct ptunit_result cmp_pass(void) +{ + int zero = 0, one = 1, neg = -1; + const char *szero = "zero", *sone = "one", *null = NULL; + + ptu_int_eq(zero, 0); + ptu_int_ne(zero, one); + ptu_int_lt(neg, 0); + ptu_int_gt(zero, neg); + + ptu_uint_eq(zero, 0); + ptu_uint_ne(zero, one); + ptu_uint_lt(zero, one); + ptu_uint_gt(neg, one); + + ptu_ptr_eq(szero, szero); + ptu_ptr_ne(szero, sone); + ptu_null(null); + ptu_ptr(szero); + + ptu_str_eq(szero, szero); + ptu_str_ne(szero, sone); + + return ptu_passed(); +} + +static struct ptunit_result int_eq_fail(void) +{ + int zero = 0, one = 1; + + ptu_int_eq(zero, one); + + return ptu_skipped(); +} + +static struct ptunit_result int_fail(void) +{ + struct ptunit_result result; + + result = int_eq_fail(); + + ptu_uint_eq(result.type, ptur_failed_signed_int); + ptu_str_eq(result.failed.where.file, __FILE__); + ptu_uint_lt(result.failed.where.line, __LINE__); + ptu_str_eq(result.failed.variant.signed_int.expr, "zero==one"); + ptu_str_eq(result.failed.variant.signed_int.cmp, "=="); + ptu_int_eq(result.failed.variant.signed_int.expected, 1); + ptu_int_eq(result.failed.variant.signed_int.actual, 0); + + return ptu_passed(); +} + +static struct ptunit_result uint_eq_fail(void) +{ + uint16_t zero = 0, one = 1; + + ptu_uint_eq(zero, one); + + return ptu_skipped(); +} + +static struct ptunit_result uint_fail(void) +{ + struct ptunit_result result; + + result = uint_eq_fail(); + + ptu_uint_eq(result.type, ptur_failed_unsigned_int); + ptu_str_eq(result.failed.where.file, __FILE__); + ptu_uint_lt(result.failed.where.line, __LINE__); + ptu_str_eq(result.failed.variant.unsigned_int.expr, "zero==one"); + ptu_str_eq(result.failed.variant.unsigned_int.cmp, "=="); + ptu_int_eq(result.failed.variant.unsigned_int.expected, 1); + ptu_int_eq(result.failed.variant.unsigned_int.actual, 0); + + return ptu_passed(); +} + +static int i, j, *pi = &i, *null; + +static struct ptunit_result ptr_eq_fail(void) +{ + ptu_ptr_eq(pi, &j); + + return ptu_skipped(); +} + +static struct ptunit_result ptr_fail(void) +{ + struct ptunit_result result; + + result = ptr_eq_fail(); + + ptu_uint_eq(result.type, ptur_failed_pointer); + ptu_str_eq(result.failed.where.file, __FILE__); + ptu_uint_lt(result.failed.where.line, __LINE__); + ptu_str_eq(result.failed.variant.pointer.expr, "pi==&j"); + ptu_str_eq(result.failed.variant.pointer.cmp, "=="); + ptu_ptr_eq(result.failed.variant.pointer.expected, &j); + ptu_ptr_eq(result.failed.variant.pointer.actual, &i); + + return ptu_passed(); +} + +static struct ptunit_result ptr_null_fail(void) +{ + ptu_null(pi); + + return ptu_skipped(); +} + +static struct ptunit_result null_fail(void) +{ + struct ptunit_result result; + + result = ptr_null_fail(); + + ptu_uint_eq(result.type, ptur_failed_pointer); + ptu_str_eq(result.failed.where.file, __FILE__); + ptu_uint_lt(result.failed.where.line, __LINE__); + ptu_str_eq(result.failed.variant.pointer.expr, "pi==NULL"); + ptu_str_eq(result.failed.variant.pointer.cmp, "=="); + ptu_ptr_eq(result.failed.variant.pointer.expected, NULL); + ptu_ptr_eq(result.failed.variant.pointer.actual, &i); + + return ptu_passed(); +} + +static struct ptunit_result ptr_check_fail(void) +{ + ptu_ptr(null); + + return ptu_skipped(); +} + +static struct ptunit_result check_fail(void) +{ + struct ptunit_result result; + + result = ptr_check_fail(); + + ptu_uint_eq(result.type, ptur_failed_pointer); + ptu_str_eq(result.failed.where.file, __FILE__); + ptu_uint_lt(result.failed.where.line, __LINE__); + ptu_str_eq(result.failed.variant.pointer.expr, "null!=NULL"); + ptu_str_eq(result.failed.variant.pointer.cmp, "!="); + ptu_ptr_eq(result.failed.variant.pointer.expected, NULL); + ptu_ptr_eq(result.failed.variant.pointer.actual, null); + + return ptu_passed(); +} + +/* A unit test fixture providing a unit test struct and cleaning it up. */ +struct test_fixture { + /* A unit test. */ + struct ptunit_test test; + + /* Standard initialization and finalization functions. */ + struct ptunit_result (*init)(struct test_fixture *); + struct ptunit_result (*fini)(struct test_fixture *); +}; + +static struct ptunit_result init_test_fixture(struct test_fixture *tfix) +{ + tfix->test = ptunit_mk_test(NULL, NULL); + + return ptu_passed(); +} + +static struct ptunit_result fini_test_fixture(struct test_fixture *tfix) +{ + ptunit_fini_test(&tfix->test); + + return ptu_passed(); +} + +static const char *sfoo = "foo", *sbar = "bar", *snull; + +static struct ptunit_result str_eq_fail(void) +{ + ptu_str_eq(sfoo, sbar); + + return ptu_skipped(); +} + +static struct ptunit_result str_fail(struct test_fixture *tfix) +{ + struct ptunit_result *result = &tfix->test.result; + + *result = str_eq_fail(); + + ptu_uint_eq(result->type, ptur_failed_str); + ptu_str_eq(result->failed.where.file, __FILE__); + ptu_uint_lt(result->failed.where.line, __LINE__); + ptu_str_eq(result->failed.variant.str.expr, "sfoo~==sbar"); + ptu_str_eq(result->failed.variant.str.cmp, "~=="); + ptu_str_eq(result->failed.variant.str.expected, "bar"); + ptu_str_eq(result->failed.variant.str.actual, "foo"); + + return ptu_passed(); +} + +static struct ptunit_result str_eq_null(void) +{ + ptu_str_eq(snull, sbar); + + return ptu_skipped(); +} + +static struct ptunit_result str_null(struct test_fixture *tfix) +{ + struct ptunit_result *result = &tfix->test.result; + + *result = str_eq_null(); + + ptu_uint_eq(result->type, ptur_failed_str); + ptu_str_eq(result->failed.where.file, __FILE__); + ptu_uint_lt(result->failed.where.line, __LINE__); + ptu_str_eq(result->failed.variant.str.expr, "snull~==sbar"); + ptu_str_eq(result->failed.variant.str.cmp, "~=="); + ptu_str_eq(result->failed.variant.str.expected, "bar"); + ptu_str_eq(result->failed.variant.str.actual, "(null)"); + + return ptu_passed(); +} + +static struct ptunit_result param(int arg_i, int *arg_pi) +{ + ptu_int_eq(arg_i, i); + ptu_ptr_eq(arg_pi, pi); + + return ptu_passed(); +} + +struct fixture { + struct ptunit_result (*fini)(struct fixture *); + uint8_t *pointer; + struct ptunit_result (*init)(struct fixture *); +}; + +static struct ptunit_result init_fixture(struct fixture *pfix) +{ + pfix->pointer = malloc(42); + + return ptu_passed(); +} + +static struct ptunit_result fini_fixture(struct fixture *pfix) +{ + free(pfix->pointer); + + return ptu_passed(); +} + +static struct ptunit_result fixture(struct fixture *pfix) +{ + ptu_ptr(pfix); + ptu_ptr(pfix->pointer); + + return ptu_passed(); +} + +static struct ptunit_result fixture_param(struct fixture *pfix, uint8_t *rep) +{ + ptu_ptr(pfix); + ptu_ptr(pfix->pointer); + + free(pfix->pointer); + pfix->pointer = rep; + + return ptu_passed(); +} + +static struct ptunit_result frun_pass(struct fixture *pfix) +{ + (void) pfix; + + return ptu_passed(); +} + +static struct ptunit_result frun_skip(struct fixture *pfix) +{ + (void) pfix; + + return ptu_skipped(); +} + +static struct ptunit_result frun_fail(struct fixture *pfix) +{ + ptu_null(pfix); + + return ptu_passed(); +} + +static struct ptunit_result frun_die(struct fixture *pfix) +{ + (void) pfix; + + *((volatile int *) NULL) = 0; + + return ptu_skipped(); +} + +static struct ptunit_result frun_empty_pass(void) +{ + struct fixture pfix; + struct ptunit_result result; + + pfix.init = NULL; + pfix.fini = NULL; + ptunit_frun(result, frun_pass, &pfix, &pfix); + + ptu_uint_eq(result.type, ptur_passed); + + return ptu_passed(); +} + +static struct ptunit_result frun_init_fail(struct fixture *pfix) +{ + struct ptunit_result result; + + pfix->init = frun_fail; + pfix->fini = frun_skip; + ptunit_frun(result, frun_die, pfix, pfix); + + ptu_uint_eq(result.type, ptur_failed_pointer); + ptu_str_eq(result.failed.where.file, __FILE__); + ptu_uint_lt(result.failed.where.line, __LINE__); + ptu_str_eq(result.failed.variant.pointer.expr, "pfix==NULL"); + ptu_str_eq(result.failed.variant.pointer.cmp, "=="); + ptu_ptr_eq(result.failed.variant.pointer.expected, NULL); + ptu_ptr_eq(result.failed.variant.pointer.actual, pfix); + + return ptu_passed(); +} + +static struct ptunit_result frun_init_skip(void) +{ + struct fixture pfix; + struct ptunit_result result; + + pfix.init = frun_skip; + pfix.fini = frun_fail; + ptunit_frun(result, frun_die, &pfix, &pfix); + + ptu_uint_eq(result.type, ptur_skipped); + + return ptu_passed(); +} + +static struct ptunit_result frun_fini_fail(struct fixture *pfix) +{ + struct ptunit_result result; + + pfix->init = NULL; + pfix->fini = frun_fail; + ptunit_frun(result, frun_pass, pfix, pfix); + + ptu_uint_eq(result.type, ptur_failed_pointer); + ptu_str_eq(result.failed.where.file, __FILE__); + ptu_uint_lt(result.failed.where.line, __LINE__); + ptu_str_eq(result.failed.variant.pointer.expr, "pfix==NULL"); + ptu_str_eq(result.failed.variant.pointer.cmp, "=="); + ptu_ptr_eq(result.failed.variant.pointer.expected, NULL); + ptu_ptr_eq(result.failed.variant.pointer.actual, pfix); + + return ptu_passed(); +} + +static struct ptunit_result frun_fini_skip(void) +{ + struct fixture pfix; + struct ptunit_result result; + + pfix.init = NULL; + pfix.fini = frun_skip; + ptunit_frun(result, frun_pass, &pfix, &pfix); + + ptu_uint_eq(result.type, ptur_skipped); + + return ptu_passed(); +} + +static struct ptunit_result frun_fini_preserve(void) +{ + struct fixture pfix; + struct ptunit_result result; + + pfix.init = NULL; + pfix.fini = frun_fail; + ptunit_frun(result, frun_skip, &pfix, &pfix); + + ptu_uint_eq(result.type, ptur_skipped); + + return ptu_passed(); +} + +int main(int argc, char **argv) +{ + struct ptunit_suite suite; + struct test_fixture tfix; + struct fixture pfix; + + suite = ptunit_mk_suite(argc, argv); + + ptu_run(suite, cmp_pass); + ptu_run(suite, int_fail); + ptu_run(suite, uint_fail); + ptu_run(suite, ptr_fail); + ptu_run(suite, null_fail); + ptu_run(suite, check_fail); + + tfix.init = init_test_fixture; + tfix.fini = fini_test_fixture; + + ptu_run_f(suite, str_fail, tfix); + ptu_run_f(suite, str_null, tfix); + + pfix.pointer = NULL; + pfix.init = init_fixture; + pfix.fini = fini_fixture; + + ptu_run_p(suite, param, i, pi); + ptu_run_f(suite, fixture, pfix); + ptu_run_fp(suite, fixture_param, pfix, NULL); + + ptu_run(suite, frun_empty_pass); + ptu_run(suite, frun_init_skip); + ptu_run(suite, frun_fini_skip); + ptu_run(suite, frun_fini_preserve); + + ptu_run_p(suite, frun_init_fail, &pfix); + ptu_run_p(suite, frun_fini_fail, &pfix); + + ptunit_report(&suite); + return suite.nr_fails; +} diff --git a/ptxed/CMakeLists.txt b/ptxed/CMakeLists.txt new file mode 100644 index 0000000..b81b426 --- /dev/null +++ b/ptxed/CMakeLists.txt @@ -0,0 +1,80 @@ +# Copyright (c) 2013-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +set(XED_INCLUDE "" CACHE PATH "") +set(XED_LIBDIR "" CACHE PATH "") + +option(FEATURE_ELF "Support loading ELF files in ptxed." OFF) +if (FEATURE_ELF) + add_definitions(-DFEATURE_ELF) +endif (FEATURE_ELF) + +include_directories( + include + ../libipt/internal/include +) + +include_directories(SYSTEM + ${XED_INCLUDE} +) + +link_directories( + ${XED_LIBDIR} +) + +set(PTXED_FILES + src/ptxed.c + ../libipt/src/pt_cpu.c +) + +if (CMAKE_HOST_UNIX) + set(PTXED_FILES ${PTXED_FILES} ../libipt/src/posix/pt_cpuid.c) +endif (CMAKE_HOST_UNIX) + +if (CMAKE_HOST_WIN32) + set(PTXED_FILES ${PTXED_FILES} ../libipt/src/windows/pt_cpuid.c) +endif (CMAKE_HOST_WIN32) + +if (FEATURE_ELF) + set(PTXED_FILES ${PTXED_FILES} src/load_elf.c) +endif (FEATURE_ELF) + +add_executable(ptxed + ${PTXED_FILES} +) +target_link_libraries(ptxed libipt) +target_link_libraries(ptxed xed) + +if (CMAKE_HOST_WIN32) + # suppress warnings from XED header files + # + # w4127: conditional expression is constant + # w4244: conversion: possible loss of data + # + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244") + +endif (CMAKE_HOST_WIN32) diff --git a/ptxed/include/load_elf.h b/ptxed/include/load_elf.h new file mode 100644 index 0000000..7d63877 --- /dev/null +++ b/ptxed/include/load_elf.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LOAD_ELF_H +#define LOAD_ELF_H + +#include + +struct pt_image_section_cache; +struct pt_image; + + +/* Load an ELF file. + * + * Adds sections for all ELF LOAD segments. + * + * The sections are loaded relative to their virtual addresses specified + * in the ELF program header with the lowest address section loaded at @base. + * + * The name of the program in @prog is used for error reporting. + * If @verbose is non-zero, prints information about loaded sections. + * + * Does not load dependent files. + * Does not support dynamic relocations. + * + * Successfully loaded segments are not unloaded in case of errors. + * + * If @iscache is not NULL, use it to cache image sections. + * + * Returns 0 on success, a negative error code otherwise. + * Returns -pte_invalid if @image or @file are NULL. + * Returns -pte_bad_config if @file can't be processed. + * Returns -pte_nomem if not enough memory can be allocated. + */ +extern int load_elf(struct pt_image_section_cache *iscache, + struct pt_image *image, const char *file, + uint64_t base, const char *prog, int verbose); + +#endif /* LOAD_ELF_H */ diff --git a/ptxed/src/load_elf.c b/ptxed/src/load_elf.c new file mode 100644 index 0000000..17b86c7 --- /dev/null +++ b/ptxed/src/load_elf.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "load_elf.h" + +#include "intel-pt.h" + +#include +#include +#include +#include +#include + + +static int load_section(struct pt_image_section_cache *iscache, + struct pt_image *image, const char *name, + uint64_t offset, uint64_t size, uint64_t vaddr) +{ + if (!iscache) + return pt_image_add_file(image, name, offset, size, NULL, + vaddr); + else { + int isid; + + isid = pt_iscache_add_file(iscache, name, offset, size, vaddr); + if (isid < 0) + return isid; + + return pt_image_add_cached(image, iscache, isid, NULL); + } +} + +static int load_elf32(struct pt_image_section_cache *iscache, + struct pt_image *image, FILE *file, uint64_t base, + const char *name, const char *prog, int verbose) +{ + Elf32_Ehdr ehdr; + Elf32_Half pidx; + int64_t offset; + size_t count; + int errcode, sections; + + errcode = fseek(file, 0, SEEK_SET); + if (errcode) { + fprintf(stderr, + "%s: warning: %s error seeking ELF header: %s.\n", + prog, name, strerror(errno)); + return -pte_bad_config; + } + + count = fread(&ehdr, sizeof(ehdr), 1, file); + if (count != 1) { + fprintf(stderr, + "%s: warning: %s error reading ELF header: %s.\n", + prog, name, strerror(errno)); + return -pte_bad_config; + } + + errcode = fseek(file, ehdr.e_phoff, SEEK_SET); + if (errcode) { + fprintf(stderr, + "%s: warning: %s error seeking program header: %s.\n", + prog, name, strerror(errno)); + return -pte_bad_config; + } + + /* Determine the load offset. */ + if (!base) + offset = 0; + else { + uint64_t minaddr; + + minaddr = UINT64_MAX; + + for (pidx = 0; pidx < ehdr.e_phnum; ++pidx) { + Elf32_Phdr phdr; + + count = fread(&phdr, sizeof(phdr), 1, file); + if (count != 1) { + fprintf(stderr, + "%s: warning: %s error reading " + "phdr %u: %s.\n", + prog, name, pidx, strerror(errno)); + return -pte_bad_config; + } + + if (phdr.p_type != PT_LOAD) + continue; + + if (phdr.p_vaddr < minaddr) + minaddr = phdr.p_vaddr; + } + + offset = base - minaddr; + } + + errcode = fseek(file, ehdr.e_phoff, SEEK_SET); + if (errcode) { + fprintf(stderr, + "%s: warning: %s error seeking program header: %s.\n", + prog, name, strerror(errno)); + return -pte_bad_config; + } + + for (sections = 0, pidx = 0; pidx < ehdr.e_phnum; ++pidx) { + Elf32_Phdr phdr; + + count = fread(&phdr, sizeof(phdr), 1, file); + if (count != 1) { + fprintf(stderr, + "%s: warning: %s error reading phdr %u: %s.\n", + prog, name, pidx, strerror(errno)); + return -pte_bad_config; + } + + if (phdr.p_type != PT_LOAD) + continue; + + if (!phdr.p_filesz) + continue; + + errcode = load_section(iscache, image, name, phdr.p_offset, + phdr.p_filesz, phdr.p_vaddr + offset); + if (errcode < 0) { + fprintf(stderr, "%s: warning: %s: failed to create " + "section for phdr %u: %s.\n", prog, name, pidx, + pt_errstr(pt_errcode(errcode))); + continue; + } + + sections += 1; + + if (verbose) { + printf("%s: phdr %u [%s]", prog, pidx, name); + printf(" offset=0x%" PRIx32, phdr.p_offset); + printf(" size=0x%" PRIx32, phdr.p_filesz); + printf(" vaddr=0x%" PRIx32, phdr.p_vaddr); + printf(".\n"); + } + } + + if (!sections) + fprintf(stderr, + "%s: warning: %s: did not find any load sections.\n", + prog, name); + + return 0; +} + +static int load_elf64(struct pt_image_section_cache *iscache, + struct pt_image *image, FILE *file, uint64_t base, + const char *name, const char *prog, int verbose) +{ + Elf64_Ehdr ehdr; + Elf64_Half pidx; + int64_t offset; + size_t count; + int errcode, sections; + + errcode = fseek(file, 0, SEEK_SET); + if (errcode) { + fprintf(stderr, + "%s: warning: %s error seeking ELF header: %s.\n", + prog, name, strerror(errno)); + return -pte_bad_config; + } + + count = fread(&ehdr, sizeof(ehdr), 1, file); + if (count != 1) { + fprintf(stderr, + "%s: warning: %s error reading ELF header: %s.\n", + prog, name, strerror(errno)); + return -pte_bad_config; + } + + errcode = fseek(file, ehdr.e_phoff, SEEK_SET); + if (errcode) { + fprintf(stderr, + "%s: warning: %s error seeking program header: %s.\n", + prog, name, strerror(errno)); + return -pte_bad_config; + } + + /* Determine the load offset. */ + if (!base) + offset = 0; + else { + uint64_t minaddr; + + minaddr = UINT64_MAX; + + for (pidx = 0; pidx < ehdr.e_phnum; ++pidx) { + Elf64_Phdr phdr; + + count = fread(&phdr, sizeof(phdr), 1, file); + if (count != 1) { + fprintf(stderr, + "%s: warning: %s error reading " + "phdr %u: %s.\n", + prog, name, pidx, strerror(errno)); + return -pte_bad_config; + } + + if (phdr.p_type != PT_LOAD) + continue; + + if (phdr.p_vaddr < minaddr) + minaddr = phdr.p_vaddr; + } + + offset = base - minaddr; + } + + errcode = fseek(file, ehdr.e_phoff, SEEK_SET); + if (errcode) { + fprintf(stderr, + "%s: warning: %s error seeking program header: %s.\n", + prog, name, strerror(errno)); + return -pte_bad_config; + } + + for (sections = 0, pidx = 0; pidx < ehdr.e_phnum; ++pidx) { + Elf64_Phdr phdr; + + count = fread(&phdr, sizeof(phdr), 1, file); + if (count != 1) { + fprintf(stderr, + "%s: warning: %s error reading phdr %u: %s.\n", + prog, name, pidx, strerror(errno)); + return -pte_bad_config; + } + + if (phdr.p_type != PT_LOAD) + continue; + + if (!phdr.p_filesz) + continue; + + errcode = load_section(iscache, image, name, phdr.p_offset, + phdr.p_filesz, phdr.p_vaddr + offset); + if (errcode < 0) { + fprintf(stderr, "%s: warning: %s: failed to create " + "section for phdr %u: %s.\n", prog, name, pidx, + pt_errstr(pt_errcode(errcode))); + continue; + } + + sections += 1; + + if (verbose) { + printf("%s: phdr %u [%s]", prog, pidx, name); + printf(" offset=0x%" PRIx64, phdr.p_offset); + printf(" size=0x%" PRIx64, phdr.p_filesz); + printf(" vaddr=0x%" PRIx64, phdr.p_vaddr); + printf(".\n"); + } + } + + if (!sections) + fprintf(stderr, + "%s: warning: %s: did not find any load sections.\n", + prog, name); + + return 0; +} + +int load_elf(struct pt_image_section_cache *iscache, struct pt_image *image, + const char *name, uint64_t base, const char *prog, int verbose) +{ + uint8_t e_ident[EI_NIDENT]; + FILE *file; + size_t count; + int errcode, idx; + + if (!image || !name) + return -pte_invalid; + + file = fopen(name, "rb"); + if (!file) { + fprintf(stderr, "%s: warning: failed to open %s: %s.\n", prog, + name, strerror(errno)); + return -pte_bad_config; + } + + count = fread(e_ident, sizeof(e_ident), 1, file); + if (count != 1) { + fprintf(stderr, + "%s: warning: %s failed to read file header: %s.\n", + prog, name, strerror(errno)); + + errcode = -pte_bad_config; + goto out; + } + + for (idx = 0; idx < SELFMAG; ++idx) { + if (e_ident[idx] != ELFMAG[idx]) { + fprintf(stderr, + "%s: warning: ignoring %s: not an ELF file.\n", + prog, name); + + errcode = -pte_bad_config; + goto out; + } + } + + switch (e_ident[EI_CLASS]) { + default: + fprintf(stderr, "%s: unsupported ELF class: %d\n", + prog, e_ident[EI_CLASS]); + errcode = -pte_bad_config; + break; + + case ELFCLASS32: + errcode = load_elf32(iscache, image, file, base, name, prog, + verbose); + break; + + case ELFCLASS64: + errcode = load_elf64(iscache, image, file, base, name, prog, + verbose); + break; + } + +out: + fclose(file); + return errcode; +} diff --git a/ptxed/src/ptxed.c b/ptxed/src/ptxed.c new file mode 100644 index 0000000..9d57b1a --- /dev/null +++ b/ptxed/src/ptxed.c @@ -0,0 +1,1822 @@ +/* + * Copyright (c) 2013-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#if defined(FEATURE_ELF) +# include "load_elf.h" +#endif /* defined(FEATURE_ELF) */ + +#include "pt_cpu.h" + +#include "intel-pt.h" + +#include +#include +#include +#include +#include + +#include + + +/* The type of decoder to be used. */ +enum ptxed_decoder_type { + pdt_insn_decoder, + pdt_block_decoder +}; + +/* The decoder to use. */ +struct ptxed_decoder { + /* The decoder type. */ + enum ptxed_decoder_type type; + + /* The actual decoder. */ + union { + /* If @type == pdt_insn_decoder */ + struct pt_insn_decoder *insn; + + /* If @type == pdt_block_decoder */ + struct pt_block_decoder *block; + } variant; +}; + +/* A collection of options. */ +struct ptxed_options { + /* Do not print the instruction. */ + uint32_t dont_print_insn:1; + + /* Remain as quiet as possible - excluding error messages. */ + uint32_t quiet:1; + + /* Print statistics (overrides quiet). */ + uint32_t print_stats:1; + + /* Print information about section loads and unloads. */ + uint32_t track_image:1; + + /* Track blocks in the output. + * + * This only applies to the block decoder. + */ + uint32_t track_blocks:1; + + /* Print in AT&T format. */ + uint32_t att_format:1; + + /* Print the offset into the trace file. */ + uint32_t print_offset:1; + + /* Print the current timestamp. */ + uint32_t print_time:1; + + /* Print the raw bytes for an insn. */ + uint32_t print_raw_insn:1; + + /* Perform checks. */ + uint32_t check:1; +}; + +/* A collection of flags selecting which stats to collect/print. */ +enum ptxed_stats_flag { + /* Collect number of instructions. */ + ptxed_stat_insn = (1 << 0), + + /* Collect number of blocks. */ + ptxed_stat_blocks = (1 << 1) +}; + +/* A collection of statistics. */ +struct ptxed_stats { + /* The number of instructions. */ + uint64_t insn; + + /* The number of blocks. + * + * This only applies to the block decoder. + */ + uint64_t blocks; + + /* A collection of flags saying which statistics to collect/print. */ + uint32_t flags; +}; + +static int ptxed_have_decoder(const struct ptxed_decoder *decoder) +{ + /* It suffices to check for one decoder in the variant union. */ + return decoder && decoder->variant.insn; +} + +static void ptxed_free_decoder(struct ptxed_decoder *decoder) +{ + if (!decoder) + return; + + switch (decoder->type) { + case pdt_insn_decoder: + pt_insn_free_decoder(decoder->variant.insn); + break; + + case pdt_block_decoder: + pt_blk_free_decoder(decoder->variant.block); + break; + } +} + +static void version(const char *name) +{ + struct pt_version v = pt_library_version(); + + printf("%s-%d.%d.%d%s / libipt-%" PRIu8 ".%" PRIu8 ".%" PRIu32 "%s\n", + name, PT_VERSION_MAJOR, PT_VERSION_MINOR, PT_VERSION_BUILD, + PT_VERSION_EXT, v.major, v.minor, v.build, v.ext); +} + +static void help(const char *name) +{ + printf("usage: %s []\n\n", name); + printf("options:\n"); + printf(" --help|-h this text.\n"); + printf(" --version display version information and exit.\n"); + printf(" --att print instructions in att format.\n"); + printf(" --no-inst do not print instructions (only addresses).\n"); + printf(" --quiet|-q do not print anything (except errors).\n"); + printf(" --offset print the offset into the trace file.\n"); + printf(" --time print the current timestamp.\n"); + printf(" --raw-insn print the raw bytes of each instruction.\n"); + printf(" --check perform checks (expensive).\n"); + printf(" --stat print statistics (even when quiet).\n"); + printf(" collects all statistics unless one or more are selected.\n"); + printf(" --stat:insn collect number of instructions.\n"); + printf(" --verbose|-v print various information (even when quiet).\n"); + printf(" --pt [:[-]] load the processor trace data from .\n"); + printf(" an optional offset or range can be given.\n"); +#if defined(FEATURE_ELF) + printf(" --elf <[:] load an ELF from at address .\n"); + printf(" use the default load address if is omitted.\n"); +#endif /* defined(FEATURE_ELF) */ + printf(" --raw [:[-]]: load a raw binary from at address .\n"); + printf(" an optional offset or range can be given.\n"); + printf(" --cpu none|auto|f/m[/s] set cpu to the given value and decode according to:\n"); + printf(" none spec (default)\n"); + printf(" auto current cpu\n"); + printf(" f/m[/s] family/model[/stepping]\n"); + printf(" --mtc-freq set the MTC frequency (IA32_RTIT_CTL[17:14]) to .\n"); + printf(" --nom-freq set the nominal frequency (MSR_PLATFORM_INFO[15:8]) to .\n"); + printf(" --cpuid-0x15.eax set the value of cpuid[0x15].eax.\n"); + printf(" --cpuid-0x15.ebx set the value of cpuid[0x15].ebx.\n"); + printf(" --insn-decoder use the instruction flow decoder (default).\n"); + printf(" --block-decoder use the block decoder.\n"); + printf(" --block:show-blocks show blocks in the output.\n"); + printf(" --block:end-on-call set the end-on-call block decoder flag.\n"); + printf("\n"); +#if defined(FEATURE_ELF) + printf("You must specify at least one binary or ELF file (--raw|--elf).\n"); +#else /* defined(FEATURE_ELF) */ + printf("You must specify at least one binary file (--raw).\n"); +#endif /* defined(FEATURE_ELF) */ + printf("You must specify exactly one processor trace file (--pt).\n"); +} + +static int extract_base(char *arg, uint64_t *base) +{ + char *sep, *rest; + + sep = strrchr(arg, ':'); + if (sep) { + uint64_t num; + + if (!sep[1]) + return 0; + + errno = 0; + num = strtoull(sep+1, &rest, 0); + if (errno || *rest) + return 0; + + *base = num; + *sep = 0; + return 1; + } + + return 0; +} + +static int parse_range(const char *arg, uint64_t *begin, uint64_t *end) +{ + char *rest; + + if (!arg || !*arg) + return 0; + + errno = 0; + *begin = strtoull(arg, &rest, 0); + if (errno) + return -1; + + if (!*rest) + return 1; + + if (*rest != '-') + return -1; + + *end = strtoull(rest+1, &rest, 0); + if (errno || *rest) + return -1; + + return 2; +} + +/* Preprocess a filename argument. + * + * A filename may optionally be followed by a file offset or a file range + * argument separated by ':'. Split the original argument into the filename + * part and the offset/range part. + * + * If no end address is specified, set @size to zero. + * If no offset is specified, set @offset to zero. + * + * Returns zero on success, a negative error code otherwise. + */ +static int preprocess_filename(char *filename, uint64_t *offset, uint64_t *size) +{ + uint64_t begin, end; + char *range; + int parts; + + if (!filename || !offset || !size) + return -pte_internal; + + /* Search from the end as the filename may also contain ':'. */ + range = strrchr(filename, ':'); + if (!range) { + *offset = 0ull; + *size = 0ull; + + return 0; + } + + /* Let's try to parse an optional range suffix. + * + * If we can, remove it from the filename argument. + * If we can not, assume that the ':' is part of the filename, e.g. a + * drive letter on Windows. + */ + parts = parse_range(range + 1, &begin, &end); + if (parts <= 0) { + *offset = 0ull; + *size = 0ull; + + return 0; + } + + if (parts == 1) { + *offset = begin; + *size = 0ull; + + *range = 0; + + return 0; + } + + if (parts == 2) { + if (end <= begin) + return -pte_invalid; + + *offset = begin; + *size = end - begin; + + *range = 0; + + return 0; + } + + return -pte_internal; +} + +static int load_file(uint8_t **buffer, size_t *psize, const char *filename, + uint64_t offset, uint64_t size, const char *prog) +{ + uint8_t *content; + size_t read; + FILE *file; + long fsize, begin, end; + int errcode; + + if (!buffer || !psize || !filename || !prog) { + fprintf(stderr, "%s: internal error.\n", prog ? prog : ""); + return -1; + } + + errno = 0; + file = fopen(filename, "rb"); + if (!file) { + fprintf(stderr, "%s: failed to open %s: %d.\n", + prog, filename, errno); + return -1; + } + + errcode = fseek(file, 0, SEEK_END); + if (errcode) { + fprintf(stderr, "%s: failed to determine size of %s: %d.\n", + prog, filename, errno); + goto err_file; + } + + fsize = ftell(file); + if (fsize < 0) { + fprintf(stderr, "%s: failed to determine size of %s: %d.\n", + prog, filename, errno); + goto err_file; + } + + begin = (long) offset; + if (((uint64_t) begin != offset) || (fsize <= begin)) { + fprintf(stderr, + "%s: bad offset 0x%" PRIx64 " into %s.\n", + prog, offset, filename); + goto err_file; + } + + end = fsize; + if (size) { + uint64_t range_end; + + range_end = offset + size; + if ((uint64_t) end < range_end) { + fprintf(stderr, + "%s: bad range 0x%" PRIx64 " in %s.\n", + prog, range_end, filename); + goto err_file; + } + + end = (long) range_end; + } + + fsize = end - begin; + + content = malloc(fsize); + if (!content) { + fprintf(stderr, "%s: failed to allocated memory %s.\n", + prog, filename); + goto err_file; + } + + errcode = fseek(file, begin, SEEK_SET); + if (errcode) { + fprintf(stderr, "%s: failed to load %s: %d.\n", + prog, filename, errno); + goto err_content; + } + + read = fread(content, fsize, 1, file); + if (read != 1) { + fprintf(stderr, "%s: failed to load %s: %d.\n", + prog, filename, errno); + goto err_content; + } + + fclose(file); + + *buffer = content; + *psize = fsize; + + return 0; + +err_content: + free(content); + +err_file: + fclose(file); + return -1; +} + +static int load_pt(struct pt_config *config, char *arg, const char *prog) +{ + uint64_t foffset, fsize; + uint8_t *buffer; + size_t size; + int errcode; + + errcode = preprocess_filename(arg, &foffset, &fsize); + if (errcode < 0) { + fprintf(stderr, "%s: bad file %s: %s.\n", prog, arg, + pt_errstr(pt_errcode(errcode))); + return -1; + } + + errcode = load_file(&buffer, &size, arg, foffset, fsize, prog); + if (errcode < 0) + return errcode; + + config->begin = buffer; + config->end = buffer + size; + + return 0; +} + +static int load_raw(struct pt_image_section_cache *iscache, + struct pt_image *image, char *arg, const char *prog) +{ + uint64_t base, foffset, fsize; + int isid, errcode, has_base; + + has_base = extract_base(arg, &base); + if (has_base <= 0) + return -1; + + errcode = preprocess_filename(arg, &foffset, &fsize); + if (errcode < 0) { + fprintf(stderr, "%s: bad file %s: %s.\n", prog, arg, + pt_errstr(pt_errcode(errcode))); + return -1; + } + + if (!fsize) + fsize = UINT64_MAX; + + isid = pt_iscache_add_file(iscache, arg, foffset, fsize, base); + if (isid < 0) { + fprintf(stderr, "%s: failed to add %s at 0x%" PRIx64 ": %s.\n", + prog, arg, base, pt_errstr(pt_errcode(isid))); + return -1; + } + + errcode = pt_image_add_cached(image, iscache, isid, NULL); + if (errcode < 0) { + fprintf(stderr, "%s: failed to add %s at 0x%" PRIx64 ": %s.\n", + prog, arg, base, pt_errstr(pt_errcode(errcode))); + return -1; + } + + return 0; +} + +static xed_machine_mode_enum_t translate_mode(enum pt_exec_mode mode) +{ + switch (mode) { + case ptem_unknown: + return XED_MACHINE_MODE_INVALID; + + case ptem_16bit: + return XED_MACHINE_MODE_LEGACY_16; + + case ptem_32bit: + return XED_MACHINE_MODE_LEGACY_32; + + case ptem_64bit: + return XED_MACHINE_MODE_LONG_64; + } + + return XED_MACHINE_MODE_INVALID; +} + +static const char *visualize_iclass(enum pt_insn_class iclass) +{ + switch (iclass) { + case ptic_error: + return "unknown/error"; + + case ptic_other: + return "other"; + + case ptic_call: + return "near call"; + + case ptic_return: + return "near return"; + + case ptic_jump: + return "near jump"; + + case ptic_cond_jump: + return "cond jump"; + + case ptic_far_call: + return "far call"; + + case ptic_far_return: + return "far return"; + + case ptic_far_jump: + return "far jump"; + } + + return "undefined"; +} + +static void check_insn_iclass(const xed_inst_t *inst, + const struct pt_insn *insn, uint64_t offset) +{ + xed_category_enum_t category; + xed_iclass_enum_t iclass; + + if (!inst || !insn) { + printf("[internal error]\n"); + return; + } + + category = xed_inst_category(inst); + iclass = xed_inst_iclass(inst); + + switch (insn->iclass) { + case ptic_error: + break; + + case ptic_other: + switch (category) { + default: + return; + + case XED_CATEGORY_CALL: + case XED_CATEGORY_RET: + case XED_CATEGORY_COND_BR: + case XED_CATEGORY_UNCOND_BR: + case XED_CATEGORY_INTERRUPT: + case XED_CATEGORY_SYSCALL: + case XED_CATEGORY_SYSRET: + break; + } + break; + + case ptic_call: + if (iclass == XED_ICLASS_CALL_NEAR) + return; + + break; + + case ptic_return: + if (iclass == XED_ICLASS_RET_NEAR) + return; + + break; + + case ptic_jump: + if (iclass == XED_ICLASS_JMP) + return; + + break; + + case ptic_cond_jump: + if (category == XED_CATEGORY_COND_BR) + return; + + break; + + case ptic_far_call: + switch (iclass) { + default: + break; + + case XED_ICLASS_CALL_FAR: + case XED_ICLASS_INT: + case XED_ICLASS_INT1: + case XED_ICLASS_INT3: + case XED_ICLASS_INTO: + case XED_ICLASS_SYSCALL: + case XED_ICLASS_SYSCALL_AMD: + case XED_ICLASS_SYSENTER: + case XED_ICLASS_VMCALL: + return; + } + break; + + case ptic_far_return: + switch (iclass) { + default: + break; + + case XED_ICLASS_RET_FAR: + case XED_ICLASS_IRET: + case XED_ICLASS_IRETD: + case XED_ICLASS_IRETQ: + case XED_ICLASS_SYSRET: + case XED_ICLASS_SYSRET_AMD: + case XED_ICLASS_SYSEXIT: + case XED_ICLASS_VMLAUNCH: + case XED_ICLASS_VMRESUME: + return; + } + break; + + case ptic_far_jump: + if (iclass == XED_ICLASS_JMP_FAR) + return; + + break; + } + + /* If we get here, @insn->iclass doesn't match XED's classification. */ + printf("[%" PRIx64 ", %" PRIx64 ": iclass error: iclass: %s, " + "xed iclass: %s, category: %s]\n", offset, insn->ip, + visualize_iclass(insn->iclass), xed_iclass_enum_t2str(iclass), + xed_category_enum_t2str(category)); + +} + +static void check_insn_decode(xed_decoded_inst_t *inst, + const struct pt_insn *insn, uint64_t offset) +{ + xed_error_enum_t errcode; + + if (!inst || !insn) { + printf("[internal error]\n"); + return; + } + + xed_decoded_inst_set_mode(inst, translate_mode(insn->mode), + XED_ADDRESS_WIDTH_INVALID); + + /* Decode the instruction (again). + * + * We may have decoded the instruction already for printing. In this + * case, we will decode it twice. + * + * The more common use-case, however, is to check the instruction class + * while not printing instructions since the latter is too expensive for + * regular use with long traces. + */ + errcode = xed_decode(inst, insn->raw, insn->size); + if (errcode != XED_ERROR_NONE) { + printf("[%" PRIx64 ", %" PRIx64 ": xed error: (%u) %s]\n", + offset, insn->ip, errcode, + xed_error_enum_t2str(errcode)); + return; + } + + if (!xed_decoded_inst_valid(inst)) { + printf("[%" PRIx64 ", %" PRIx64 ": xed error: " + "invalid instruction]\n", offset, insn->ip); + return; + } +} + +static void check_insn(const struct pt_insn *insn, uint64_t offset) +{ + xed_decoded_inst_t inst; + + xed_decoded_inst_zero(&inst); + check_insn_decode(&inst, insn, offset); + + /* We need a valid instruction in order to do further checks. + * + * Invalid instructions have already been diagnosed. + */ + if (!xed_decoded_inst_valid(&inst)) + return; + + check_insn_iclass(xed_decoded_inst_inst(&inst), insn, offset); +} + +static void xed_print_insn(const xed_decoded_inst_t *inst, uint64_t ip, + const struct ptxed_options *options) +{ + xed_print_info_t pi; + char buffer[256]; + xed_bool_t ok; + + if (!inst || !options) { + printf(" [internal error]"); + return; + } + + if (options->print_raw_insn) { + xed_uint_t length, i; + + length = xed_decoded_inst_get_length(inst); + for (i = 0; i < length; ++i) + printf(" %02x", xed_decoded_inst_get_byte(inst, i)); + + for (; i < pt_max_insn_size; ++i) + printf(" "); + } + + xed_init_print_info(&pi); + pi.p = inst; + pi.buf = buffer; + pi.blen = sizeof(buffer); + pi.runtime_address = ip; + + if (options->att_format) + pi.syntax = XED_SYNTAX_ATT; + + ok = xed_format_generic(&pi); + if (!ok) { + printf(" [xed print error]"); + return; + } + + printf(" %s", buffer); +} + +static void print_insn(const struct pt_insn *insn, xed_state_t *xed, + const struct ptxed_options *options, uint64_t offset, + uint64_t time) +{ + if (!insn || !options) { + printf("[internal error]\n"); + return; + } + + if (insn->resynced) + printf("[overflow]\n"); + + if (insn->enabled) + printf("[enabled]\n"); + + if (insn->resumed) + printf("[resumed]\n"); + + if (insn->speculative) + printf("? "); + + if (options->print_offset) + printf("%016" PRIx64 " ", offset); + + if (options->print_time) + printf("%016" PRIx64 " ", time); + + printf("%016" PRIx64, insn->ip); + + if (!options->dont_print_insn) { + xed_machine_mode_enum_t mode; + xed_decoded_inst_t inst; + xed_error_enum_t errcode; + + mode = translate_mode(insn->mode); + + xed_state_set_machine_mode(xed, mode); + xed_decoded_inst_zero_set_mode(&inst, xed); + + errcode = xed_decode(&inst, insn->raw, insn->size); + switch (errcode) { + case XED_ERROR_NONE: + xed_print_insn(&inst, insn->ip, options); + break; + + default: + printf(" [xed decode error: (%u) %s]", errcode, + xed_error_enum_t2str(errcode)); + break; + } + } + + printf("\n"); + + if (insn->interrupted) + printf("[interrupt]\n"); + + if (insn->aborted) + printf("[aborted]\n"); + + if (insn->committed) + printf("[committed]\n"); + + if (insn->disabled) + printf("[disabled]\n"); + + if (insn->stopped) + printf("[stopped]\n"); +} + +static void diagnose_insn(const char *errtype, struct pt_insn_decoder *decoder, + struct pt_insn *insn, int errcode) +{ + int err; + uint64_t pos; + + err = pt_insn_get_offset(decoder, &pos); + if (err < 0) { + printf("could not determine offset: %s\n", + pt_errstr(pt_errcode(err))); + printf("[?, %" PRIx64 ": %s: %s]\n", insn->ip, errtype, + pt_errstr(pt_errcode(errcode))); + } else + printf("[%" PRIx64 ", %" PRIx64 ": %s: %s]\n", pos, + insn->ip, errtype, pt_errstr(pt_errcode(errcode))); +} + +static void decode_insn(struct pt_insn_decoder *decoder, + const struct ptxed_options *options, + struct ptxed_stats *stats) +{ + xed_state_t xed; + uint64_t offset, sync, time; + + if (!options) { + printf("[internal error]\n"); + return; + } + + xed_state_zero(&xed); + + offset = 0ull; + sync = 0ull; + time = 0ull; + for (;;) { + struct pt_insn insn; + int errcode; + + /* Initialize the IP - we use it for error reporting. */ + insn.ip = 0ull; + + errcode = pt_insn_sync_forward(decoder); + if (errcode < 0) { + uint64_t new_sync; + + if (errcode == -pte_eos) + break; + + diagnose_insn("sync error", decoder, &insn, errcode); + + /* Let's see if we made any progress. If we haven't, + * we likely never will. Bail out. + * + * We intentionally report the error twice to indicate + * that we tried to re-sync. Maybe it even changed. + */ + errcode = pt_insn_get_offset(decoder, &new_sync); + if (errcode < 0 || (new_sync <= sync)) + break; + + sync = new_sync; + continue; + } + + for (;;) { + if (options->print_offset || options->check) { + errcode = pt_insn_get_offset(decoder, &offset); + if (errcode < 0) + break; + } + + if (options->print_time) { + errcode = pt_insn_time(decoder, &time, NULL, + NULL); + if (errcode < 0) + break; + } + + errcode = pt_insn_next(decoder, &insn, sizeof(insn)); + if (errcode < 0) { + /* Even in case of errors, we may have succeeded + * in decoding the current instruction. + */ + if (insn.iclass != ptic_error) { + if (!options->quiet) + print_insn(&insn, &xed, options, + offset, time); + if (stats) + stats->insn += 1; + + if (options->check) + check_insn(&insn, offset); + } + break; + } + + if (!options->quiet) + print_insn(&insn, &xed, options, offset, time); + + if (stats) + stats->insn += 1; + + if (options->check) + check_insn(&insn, offset); + + if (errcode & pts_eos) { + if (!insn.disabled && !options->quiet) + printf("[end of trace]\n"); + + errcode = -pte_eos; + break; + } + } + + /* We shouldn't break out of the loop without an error. */ + if (!errcode) + errcode = -pte_internal; + + /* We're done when we reach the end of the trace stream. */ + if (errcode == -pte_eos) + break; + + diagnose_insn("error", decoder, &insn, errcode); + } +} + +static int xed_next_ip(uint64_t *pip, const xed_decoded_inst_t *inst, + uint64_t ip) +{ + xed_uint_t length, disp_width; + + if (!pip || !inst) + return -pte_internal; + + length = xed_decoded_inst_get_length(inst); + if (!length) { + printf("[xed error: failed to determine instruction length]\n"); + return -pte_bad_insn; + } + + ip += length; + + /* If it got a branch displacement it must be a branch. + * + * This includes conditional branches for which we don't know whether + * they were taken. The next IP won't be used in this case as a + * conditional branch ends a block. The next block will start with the + * correct IP. + */ + disp_width = xed_decoded_inst_get_branch_displacement_width(inst); + if (disp_width) + ip += xed_decoded_inst_get_branch_displacement(inst); + + *pip = ip; + return 0; +} + +static int block_fetch_insn(struct pt_insn *insn, const struct pt_block *block, + uint64_t ip, struct pt_image_section_cache *iscache) +{ + if (!insn || !block) + return -pte_internal; + + /* We can't read from an empty block. */ + if (!block->ninsn) + return -pte_invalid; + + memset(insn, 0, sizeof(*insn)); + insn->mode = block->mode; + insn->ip = ip; + + /* The last instruction in a block may be truncated. */ + if ((ip == block->end_ip) && block->truncated) { + if (!block->size || (sizeof(insn->raw) < (size_t) block->size)) + return -pte_bad_insn; + + insn->size = block->size; + memcpy(insn->raw, block->raw, insn->size); + } else { + int size; + + size = pt_iscache_read(iscache, insn->raw, sizeof(insn->raw), + block->isid, ip); + if (size < 0) + return size; + + insn->isid = block->isid; + insn->size = (uint8_t) size; + } + + return 0; +} + +static void diagnose_block_at(const char *errtype, int errcode, + struct pt_block_decoder *decoder, uint64_t ip) +{ + uint64_t pos; + int err; + + err = pt_blk_get_offset(decoder, &pos); + if (err < 0) { + printf("[could not determine offset: %s]\n", + pt_errstr(pt_errcode(err))); + + printf("[?, %" PRIx64 ": %s: %s]\n", ip, errtype, + pt_errstr(pt_errcode(errcode))); + } else + printf("[%" PRIx64 ", %" PRIx64 ": %s: %s]\n", pos, ip, + errtype, pt_errstr(pt_errcode(errcode))); +} + +static void diagnose_block(const char *errtype, int errcode, + const struct pt_block *block, + struct pt_block_decoder *decoder, + struct pt_image_section_cache *iscache) +{ + uint64_t ip; + int err; + + if (!block) { + printf("ptxed: internal error"); + return; + } + + /* Determine the IP at which to report the error. + * + * Depending on the type of error, the IP varies between that of the + * last instruction in @block or the next instruction outside of @block. + * + * When the block is empty, we use the IP of the block itself, + * i.e. where the first instruction should have been. + */ + if (!block->ninsn) + ip = block->ip; + else { + ip = block->end_ip; + + switch (errcode) { + case -pte_nomap: + case -pte_bad_insn: { + struct pt_insn insn; + xed_decoded_inst_t inst; + xed_error_enum_t xederr; + + /* Decode failed when trying to fetch or decode the next + * instruction. Since indirect or conditional branches + * end a block and don't cause an additional fetch, we + * should be able to reach that IP from the last + * instruction in @block. + * + * We ignore errors and fall back to the IP of the last + * instruction. + */ + err = block_fetch_insn(&insn, block, ip, iscache); + if (err < 0) + break; + + xed_decoded_inst_zero(&inst); + xed_decoded_inst_set_mode(&inst, + translate_mode(insn.mode), + XED_ADDRESS_WIDTH_INVALID); + + xederr = xed_decode(&inst, insn.raw, insn.size); + if (xederr != XED_ERROR_NONE) + break; + + (void) xed_next_ip(&ip, &inst, insn.ip); + } + break; + + default: + break; + } + } + + diagnose_block_at(errtype, errcode, decoder, ip); +} + +static void print_block(const struct pt_block *block, + struct pt_block_decoder *decoder, + struct pt_image_section_cache *iscache, + const struct ptxed_options *options, + const struct ptxed_stats *stats, + uint64_t offset, uint64_t time) +{ + xed_machine_mode_enum_t mode; + xed_state_t xed; + uint64_t ip; + uint16_t ninsn; + + if (!block || !options) { + printf("[internal error]\n"); + return; + } + + if (block->resynced) + printf("[overflow]\n"); + + if (block->enabled) + printf("[enabled]\n"); + + if (block->resumed) + printf("[resumed]\n"); + + if (options->track_blocks) { + printf("[block"); + if (stats) + printf(" %" PRIx64, stats->blocks); + printf("]\n"); + } + + mode = translate_mode(block->mode); + xed_state_init2(&xed, mode, XED_ADDRESS_WIDTH_INVALID); + + ip = block->ip; + ninsn = block->ninsn; + for (;;) { + struct pt_insn insn; + xed_decoded_inst_t inst; + xed_error_enum_t xederrcode; + int errcode; + + if (block->speculative) + printf("? "); + + if (options->print_offset) + printf("%016" PRIx64 " ", offset); + + if (options->print_time) + printf("%016" PRIx64 " ", time); + + printf("%016" PRIx64, ip); + + errcode = block_fetch_insn(&insn, block, ip, iscache); + if (errcode < 0) { + printf(" [fetch error: %s]\n", + pt_errstr(pt_errcode(errcode))); + break; + } + + xed_decoded_inst_zero_set_mode(&inst, &xed); + + xederrcode = xed_decode(&inst, insn.raw, insn.size); + if (xederrcode != XED_ERROR_NONE) { + printf(" [xed decode error: (%u) %s]\n", xederrcode, + xed_error_enum_t2str(xederrcode)); + break; + } + + if (!options->dont_print_insn) + xed_print_insn(&inst, insn.ip, options); + + printf("\n"); + + ninsn -= 1; + if (!ninsn) + break; + + errcode = xed_next_ip(&ip, &inst, ip); + if (errcode < 0) { + diagnose_block_at("reconstruct error", errcode, + decoder, ip); + break; + } + } + + /* Decode should have brought us to @block->end_ip. */ + if (ip != block->end_ip) + diagnose_block_at("reconstruct error", -pte_nosync, decoder, + ip); + + if (block->interrupted) + printf("[interrupt]\n"); + + if (block->aborted) + printf("[aborted]\n"); + + if (block->committed) + printf("[committed]\n"); + + if (block->disabled) + printf("[disabled]\n"); + + if (block->stopped) + printf("[stopped]\n"); +} + +static void check_block(const struct pt_block *block, + struct pt_image_section_cache *iscache, + uint64_t offset) +{ + struct pt_insn insn; + xed_decoded_inst_t inst; + uint64_t ip; + uint16_t ninsn; + int errcode; + + if (!block) { + printf("[internal error]\n"); + return; + } + + /* There's nothing to check for an empty block. */ + ninsn = block->ninsn; + if (!ninsn) + return; + + ip = block->ip; + do { + errcode = block_fetch_insn(&insn, block, ip, iscache); + if (errcode < 0) { + printf("[%" PRIx64 ", %" PRIx64 ": fetch error: %s]\n", + offset, ip, pt_errstr(pt_errcode(errcode))); + return; + } + + xed_decoded_inst_zero(&inst); + check_insn_decode(&inst, &insn, offset); + + /* We need a valid instruction in order to do further checks. + * + * Invalid instructions have already been diagnosed. + */ + if (!xed_decoded_inst_valid(&inst)) + return; + + errcode = xed_next_ip(&ip, &inst, ip); + if (errcode < 0) { + printf("[%" PRIx64 ", %" PRIx64 ": error: %s]\n", + offset, ip, pt_errstr(pt_errcode(errcode))); + return; + } + } while (--ninsn); + + /* We reached the end of the block. Both @insn and @inst refer to the + * last instruction in @block. + * + * Check that we reached the end IP of the block. + */ + if (insn.ip != block->end_ip) { + printf("[%" PRIx64 ", %" PRIx64 ": error: did not reach end: %" + PRIx64 "]\n", offset, insn.ip, block->end_ip); + } + + /* Check the last instruction's classification, if available. */ + insn.iclass = block->iclass; + if (insn.iclass) + check_insn_iclass(xed_decoded_inst_inst(&inst), &insn, offset); +} + +static void decode_block(struct pt_block_decoder *decoder, + struct pt_image_section_cache *iscache, + const struct ptxed_options *options, + struct ptxed_stats *stats) +{ + uint64_t offset, sync, time; + + if (!options) { + printf("[internal error]\n"); + return; + } + + offset = 0ull; + sync = 0ull; + time = 0ull; + for (;;) { + struct pt_block block; + int errcode; + + /* Initialize IP and ninsn - we use it for error reporting. */ + block.ip = 0ull; + block.ninsn = 0u; + + errcode = pt_blk_sync_forward(decoder); + if (errcode < 0) { + uint64_t new_sync; + + if (errcode == -pte_eos) + break; + + diagnose_block("sync error", errcode, &block, decoder, + iscache); + + /* Let's see if we made any progress. If we haven't, + * we likely never will. Bail out. + * + * We intentionally report the error twice to indicate + * that we tried to re-sync. Maybe it even changed. + */ + errcode = pt_blk_get_offset(decoder, &new_sync); + if (errcode < 0 || (new_sync <= sync)) + break; + + sync = new_sync; + continue; + } + + for (;;) { + if (options->print_offset || options->check) { + errcode = pt_blk_get_offset(decoder, &offset); + if (errcode < 0) + break; + } + + if (options->print_time) { + errcode = pt_blk_time(decoder, &time, NULL, + NULL); + if (errcode < 0) + break; + } + + errcode = pt_blk_next(decoder, &block, sizeof(block)); + if (errcode < 0) { + /* Even in case of errors, we may have succeeded + * in decoding some instructions. + */ + if (block.ninsn) { + if (stats) { + stats->insn += block.ninsn; + stats->blocks += 1; + } + + if (!options->quiet) + print_block(&block, decoder, + iscache, options, + stats, offset, + time); + + if (options->check) + check_block(&block, iscache, + offset); + } + break; + } + + if (stats) { + stats->insn += block.ninsn; + stats->blocks += 1; + } + + if (!options->quiet) + print_block(&block, decoder, iscache, options, + stats, offset, time); + + if (options->check) + check_block(&block, iscache, offset); + + if (errcode & pts_eos) { + if (!block.disabled && !options->quiet) + printf("[end of trace]\n"); + + errcode = -pte_eos; + break; + } + } + + /* We shouldn't break out of the loop without an error. */ + if (!errcode) + errcode = -pte_internal; + + /* We're done when we reach the end of the trace stream. */ + if (errcode == -pte_eos) + break; + + diagnose_block("error", errcode, &block, decoder, iscache); + } +} + +static void decode(struct ptxed_decoder *decoder, + struct pt_image_section_cache *iscache, + const struct ptxed_options *options, + struct ptxed_stats *stats) +{ + if (!decoder) { + printf("[internal error]\n"); + return; + } + + switch (decoder->type) { + case pdt_insn_decoder: + decode_insn(decoder->variant.insn, options, stats); + break; + + case pdt_block_decoder: + decode_block(decoder->variant.block, iscache, options, stats); + break; + } +} + +static void print_stats(struct ptxed_stats *stats) +{ + if (!stats) { + printf("[internal error]\n"); + return; + } + + if (stats->flags & ptxed_stat_insn) + printf("insn: %" PRIu64 ".\n", stats->insn); + + if (stats->flags & ptxed_stat_blocks) + printf("blocks:\t%" PRIu64 ".\n", stats->blocks); +} + +static int get_arg_uint64(uint64_t *value, const char *option, const char *arg, + const char *prog) +{ + char *rest; + + if (!value || !option || !prog) { + fprintf(stderr, "%s: internal error.\n", prog ? prog : "?"); + return 0; + } + + if (!arg || (arg[0] == '-' && arg[1] == '-')) { + fprintf(stderr, "%s: %s: missing argument.\n", prog, option); + return 0; + } + + errno = 0; + *value = strtoull(arg, &rest, 0); + if (errno || *rest) { + fprintf(stderr, "%s: %s: bad argument: %s.\n", prog, option, + arg); + return 0; + } + + return 1; +} + +static int get_arg_uint32(uint32_t *value, const char *option, const char *arg, + const char *prog) +{ + uint64_t val; + + if (!get_arg_uint64(&val, option, arg, prog)) + return 0; + + if (val > UINT32_MAX) { + fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option, + arg); + return 0; + } + + *value = (uint32_t) val; + + return 1; +} + +static int get_arg_uint8(uint8_t *value, const char *option, const char *arg, + const char *prog) +{ + uint64_t val; + + if (!get_arg_uint64(&val, option, arg, prog)) + return 0; + + if (val > UINT8_MAX) { + fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option, + arg); + return 0; + } + + *value = (uint8_t) val; + + return 1; +} + +extern int main(int argc, char *argv[]) +{ + struct pt_image_section_cache *iscache; + struct ptxed_decoder decoder; + struct ptxed_options options; + struct ptxed_stats stats; + struct pt_config config; + struct pt_image *image; + const char *prog; + int errcode, i; + + if (!argc) { + help(""); + return 1; + } + + prog = argv[0]; + iscache = NULL; + image = NULL; + + memset(&decoder, 0, sizeof(decoder)); + decoder.type = pdt_block_decoder; + + memset(&options, 0, sizeof(options)); + memset(&stats, 0, sizeof(stats)); + + pt_config_init(&config); + + iscache = pt_iscache_alloc(NULL); + if (!iscache) { + fprintf(stderr, + "%s: failed to allocate image section cache.\n", prog); + goto err; + } + + image = pt_image_alloc(NULL); + if (!image) { + fprintf(stderr, "%s: failed to allocate image.\n", prog); + goto err; + } + + for (i = 1; i < argc;) { + char *arg; + + arg = argv[i++]; + + if (strcmp(arg, "--help") == 0 || strcmp(arg, "-h") == 0) { + help(prog); + goto out; + } + if (strcmp(arg, "--version") == 0) { + version(prog); + goto out; + } + if (strcmp(arg, "--pt") == 0) { + if (argc <= i) { + fprintf(stderr, + "%s: --pt: missing argument.\n", prog); + goto out; + } + arg = argv[i++]; + + if (ptxed_have_decoder(&decoder)) { + fprintf(stderr, + "%s: duplicate pt sources: %s.\n", + prog, arg); + goto err; + } + + errcode = pt_cpu_errata(&config.errata, &config.cpu); + if (errcode < 0) + goto err; + + errcode = load_pt(&config, arg, prog); + if (errcode < 0) + goto err; + + switch (decoder.type) { + case pdt_insn_decoder: + decoder.variant.insn = + pt_insn_alloc_decoder(&config); + if (!decoder.variant.insn) { + fprintf(stderr, "%s: failed to create " + "decoder.\n", prog); + goto err; + } + + errcode = + pt_insn_set_image(decoder.variant.insn, + image); + if (errcode < 0) { + fprintf(stderr, + "%s: failed to set image.\n", + prog); + goto err; + } + break; + + case pdt_block_decoder: + decoder.variant.block = + pt_blk_alloc_decoder(&config); + if (!decoder.variant.block) { + fprintf(stderr, "%s: failed to create " + "decoder.\n", prog); + goto err; + } + + errcode = + pt_blk_set_image(decoder.variant.block, + image); + if (errcode < 0) { + fprintf(stderr, + "%s: failed to set image.\n", + prog); + goto err; + } + break; + } + + continue; + } + if (strcmp(arg, "--raw") == 0) { + if (argc <= i) { + fprintf(stderr, + "%s: --raw: missing argument.\n", prog); + goto out; + } + arg = argv[i++]; + + errcode = load_raw(iscache, image, arg, prog); + if (errcode < 0) + goto err; + + continue; + } +#if defined(FEATURE_ELF) + if (strcmp(arg, "--elf") == 0) { + uint64_t base; + + if (argc <= i) { + fprintf(stderr, + "%s: --elf: missing argument.\n", prog); + goto out; + } + arg = argv[i++]; + base = 0ull; + errcode = extract_base(arg, &base); + if (errcode < 0) + goto err; + + errcode = load_elf(iscache, image, arg, base, prog, + options.track_image); + if (errcode < 0) + goto err; + + continue; + } +#endif /* defined(FEATURE_ELF) */ + if (strcmp(arg, "--att") == 0) { + options.att_format = 1; + continue; + } + if (strcmp(arg, "--no-inst") == 0) { + options.dont_print_insn = 1; + continue; + } + if (strcmp(arg, "--quiet") == 0 || strcmp(arg, "-q") == 0) { + options.quiet = 1; + continue; + } + if (strcmp(arg, "--offset") == 0) { + options.print_offset = 1; + continue; + } + if (strcmp(arg, "--time") == 0) { + options.print_time = 1; + continue; + } + if (strcmp(arg, "--raw-insn") == 0) { + options.print_raw_insn = 1; + continue; + } + if (strcmp(arg, "--check") == 0) { + options.check = 1; + continue; + } + if (strcmp(arg, "--stat") == 0) { + options.print_stats = 1; + continue; + } + if (strcmp(arg, "--stat:insn") == 0) { + stats.flags |= ptxed_stat_insn; + continue; + } + if (strcmp(arg, "--stat:blocks") == 0) { + stats.flags |= ptxed_stat_blocks; + continue; + } + if (strcmp(arg, "--cpu") == 0) { + /* override cpu information before the decoder + * is initialized. + */ + if (ptxed_have_decoder(&decoder)) { + fprintf(stderr, + "%s: please specify cpu before the pt source file.\n", + prog); + goto err; + } + if (argc <= i) { + fprintf(stderr, + "%s: --cpu: missing argument.\n", prog); + goto out; + } + arg = argv[i++]; + + if (strcmp(arg, "auto") == 0) { + errcode = pt_cpu_read(&config.cpu); + if (errcode < 0) { + fprintf(stderr, + "%s: error reading cpu: %s.\n", + prog, + pt_errstr(pt_errcode(errcode))); + return 1; + } + continue; + } + + if (strcmp(arg, "none") == 0) { + memset(&config.cpu, 0, sizeof(config.cpu)); + continue; + } + + errcode = pt_cpu_parse(&config.cpu, arg); + if (errcode < 0) { + fprintf(stderr, + "%s: cpu must be specified as f/m[/s]\n", + prog); + goto err; + } + continue; + } + if (strcmp(arg, "--mtc-freq") == 0) { + if (!get_arg_uint8(&config.mtc_freq, "--mtc-freq", + argv[i++], prog)) + goto err; + + continue; + } + if (strcmp(arg, "--nom-freq") == 0) { + if (!get_arg_uint8(&config.nom_freq, "--nom-freq", + argv[i++], prog)) + goto err; + + continue; + } + if (strcmp(arg, "--cpuid-0x15.eax") == 0) { + if (!get_arg_uint32(&config.cpuid_0x15_eax, + "--cpuid-0x15.eax", argv[i++], + prog)) + goto err; + + continue; + } + if (strcmp(arg, "--cpuid-0x15.ebx") == 0) { + if (!get_arg_uint32(&config.cpuid_0x15_ebx, + "--cpuid-0x15.ebx", argv[i++], + prog)) + goto err; + + continue; + } + if (strcmp(arg, "--verbose") == 0 || strcmp(arg, "-v") == 0) { + options.track_image = 1; + continue; + } + + if (strcmp(arg, "--insn-decoder") == 0) { + if (ptxed_have_decoder(&decoder)) { + fprintf(stderr, + "%s: please specify %s before the pt " + "source file.\n", arg, prog); + goto err; + } + + decoder.type = pdt_insn_decoder; + continue; + } + + if (strcmp(arg, "--block-decoder") == 0) { + if (ptxed_have_decoder(&decoder)) { + fprintf(stderr, + "%s: please specify %s before the pt " + "source file.\n", arg, prog); + goto err; + } + + decoder.type = pdt_block_decoder; + continue; + } + + if (strcmp(arg, "--block:show-blocks") == 0) { + options.track_blocks = 1; + continue; + } + + if (strcmp(arg, "--block:end-on-call") == 0) { + config.flags.variant.block.end_on_call = 1; + continue; + } + + fprintf(stderr, "%s: unknown option: %s.\n", prog, arg); + goto err; + } + + if (!ptxed_have_decoder(&decoder)) { + fprintf(stderr, "%s: no pt file.\n", prog); + goto err; + } + + xed_tables_init(); + + /* If we didn't select any statistics, select them all depending on the + * decoder type. + */ + if (options.print_stats && !stats.flags) { + stats.flags |= ptxed_stat_insn; + + if (decoder.type == pdt_block_decoder) + stats.flags |= ptxed_stat_blocks; + } + + decode(&decoder, iscache, &options, + options.print_stats ? &stats : NULL); + + if (options.print_stats) + print_stats(&stats); + +out: + ptxed_free_decoder(&decoder); + pt_image_free(image); + pt_iscache_free(iscache); + free(config.begin); + return 0; + +err: + ptxed_free_decoder(&decoder); + pt_image_free(image); + pt_iscache_free(iscache); + free(config.begin); + return 1; +} diff --git a/script/perf-read-aux.bash b/script/perf-read-aux.bash new file mode 100755 index 0000000..f79c5e9 --- /dev/null +++ b/script/perf-read-aux.bash @@ -0,0 +1,110 @@ +#! /bin/bash +# +# Copyright (c) 2015-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +set -e + +prog=`basename $0` + +usage() { + cat <] + +Extract the raw AUX area from a perf data file. + +options: + -h this text + -d print commands, don't execute them + + defaults to perf.data. +EOF +} + +dry_run=0 +while getopts "hd" opt; do + case $opt in + h) + usage + exit 0 + ;; + d) + dry_run=1 + ;; + esac +done + +shift $(($OPTIND-1)) + + +if [[ $# == 0 ]]; then + file="perf.data" +elif [[ $# == 1 ]]; then + file="$1" + shift +else + usage + exit 1 +fi + + +if [[ "$dry_run" == 0 ]]; then + nofiles=0 + + for ofile in $file-aux-idx*.bin; do + if [[ -w $ofile ]]; then + echo "$prog: $ofile is in the way." + nofiles+=1 + fi + done + + if [[ "$nofiles" > 0 ]]; then + exit 1 + fi +fi + + +perf script --no-itrace -i "$file" -D | gawk -F' ' -- ' + /PERF_RECORD_AUXTRACE / { + offset = strtonum($1) + hsize = strtonum(substr($2, 2)) + size = strtonum($5) + idx = strtonum($11) + + ofile = sprintf("%s-aux-idx%d.bin", file, idx) + begin = offset + hsize + + cmd = sprintf("dd if=%s of=%s conv=notrunc oflag=append ibs=1 skip=%d " \ + "count=%d status=none", file, ofile, begin, size) + + if (dry_run != 0) { + print cmd + } + else { + system(cmd) + } + } +' file="$file" dry_run="$dry_run" diff --git a/script/perf-read-image.bash b/script/perf-read-image.bash new file mode 100755 index 0000000..d67d51e --- /dev/null +++ b/script/perf-read-image.bash @@ -0,0 +1,84 @@ +#! /bin/bash +# +# Copyright (c) 2015-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +set -e + +prog=`basename $0` + +usage() { + cat <] + +Scan the perf data file for MMAP records and print ptxed options for +constructing a corresponding image. + + defaults to perf.data. +EOF +} + + +if [[ $# == 0 ]]; then + file="perf.data" +elif [[ $# == 1 ]]; then + file="$1" + shift +else + usage + exit 1 +fi + + +perf script --no-itrace -i "$file" -D | gawk -F' ' -- ' + function handle_mmap(file, vaddr) { + if (match(file, /\[.*\]/) != 0) { + # ignore 'virtual' file names like [kallsyms] + } + else if (match(file, /\.ko$/) != 0) { + # ignore kernel objects + # + # use /proc/kcore + } + else { + printf(" --elf %s:0x%x", file, vaddr) + } + } + + /PERF_RECORD_MMAP / { + vaddr = strtonum(substr($5, 2)) + file = $9 + + handle_mmap(file, vaddr) + } + + /PERF_RECORD_MMAP2 / { + vaddr = strtonum(substr($5, 2)) + file = $12 + + handle_mmap(file, vaddr) + } +' diff --git a/script/test.bash b/script/test.bash new file mode 100755 index 0000000..28d7ba2 --- /dev/null +++ b/script/test.bash @@ -0,0 +1,246 @@ +#! /bin/bash +# +# Copyright (c) 2013-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# This script executes ptt tests and compares the output of tools, like +# ptxed or ptdump, with the expected output from the ptt testfile. + +info() { + [[ $verbose != 0 ]] && echo -e "$@" >&2 +} + +run() { + info "$@" + "$@" +} + +asm2addr() { + local line + line=`grep -i ^org "$1"` + [[ $? != 0 ]] && return $? + echo $line | sed "s/org *//" +} + +usage() { + cat <] ... + +options: + -h this text + -v print commands as they are executed + -c cpu[,cpu] comma-separated list of cpu's for the tests (see pttc -h, for valid values) + -f exit with 1 if any of the tests failed + -l only list .diff files + -g specify the pttc command (default: pttc) + -G specify additional arguments to pttc + -d specify the ptdump command (default: ptdump) + -D specify additional arguments to ptdump + -x specify the ptxed command (default: ptxed) + -X specify additional arguments to ptxed + + annotated yasm file ending in .ptt +EOF +} + +pttc_cmd=pttc +pttc_arg="" +ptdump_cmd=ptdump +ptdump_arg="" +ptxed_cmd=ptxed +ptxed_arg="" +exit_fails=0 +list=0 +verbose=0 +while getopts "hvc:flg:G:d:D:x:X:" option; do + case $option in + h) + usage + exit 0 + ;; + v) + verbose=1 + ;; + c) + cpus=`echo $OPTARG | sed "s/,/ /g"` + ;; + f) + exit_fails=1 + ;; + l) + list=1 + ;; + g) + pttc_cmd=$OPTARG + ;; + G) + pttc_arg=$OPTARG + ;; + d) + ptdump_cmd=$OPTARG + ;; + D) + ptdump_arg=$OPTARG + ;; + x) + ptxed_cmd=$OPTARG + ;; + X) + ptxed_arg=$OPTARG + ;; + \?) + exit 1 + ;; + esac +done + +shift $(($OPTIND-1)) + +if [[ $# == 0 ]]; then + usage + exit 1 +fi + +# the exit status +status=0 + +ptt-ptdump-opts() { + sed -n 's/[ \t]*;[ \t]*opt:ptdump[ \t][ \t]*\(.*\)[ \t]*/\1/p' "$1" +} + +ptt-ptxed-opts() { + sed -n 's/[ \t]*;[ \t]*opt:ptxed[ \t][ \t]*\(.*\)[ \t]*/\1/p' "$1" +} + +run-ptt-test() { + info "\n# run-ptt-test $@" + + ptt="$1" + cpu="$2" + base=`basename "${ptt%%.ptt}"` + + if [[ -n "$cpu" ]]; then + cpu="--cpu $cpu" + fi + + # the following are the files that are generated by pttc + pt=$base.pt + bin=$base.bin + lst=$base.lst + + + # execute pttc - remove the extra \r in Windows line endings + exps=`run "$pttc_cmd" $pttc_arg $cpu "$ptt" | sed 's/\r\n/\n/g'` + ret=$? + if [[ $ret != 0 ]]; then + echo "$ptt: $pttc_cmd $pttc_arg failed with $ret" >&2 + status=1 + return + elif [[ -z $exps ]]; then + echo "$ptt: $pttc_cmd $pttc_arg did not produce any .exp file" >&2 + status=1 + return + fi + + # loop over all .exp files determine the tool, generate .out + # files and compare .exp and .out file with diff. + # all differences will be + for exp in $exps; do + exp_base=${exp%%.exp} + out=$exp_base.out + diff=$exp_base.diff + tool=${exp_base##$base-} + tool=${tool%%-cpu_*} + case $tool in + ptxed) + addr=`asm2addr "$ptt"` + if [[ $? != 0 ]]; then + echo "$ptt: org directive not found in test file" >&2 + status=1 + continue + fi + local opts=`ptt-ptxed-opts "$ptt"` + opts+=" --no-inst --check" + run "$ptxed_cmd" $ptxed_arg --raw $bin:$addr $cpu $opts --pt $pt > $out + ;; + ptdump) + local opts=`ptt-ptdump-opts "$ptt"` + run "$ptdump_cmd" $ptdump_arg $cpu $opts $pt > $out + ;; + *) + echo "$ptt: unknown tool '$tool'" + status=1 + continue + ;; + esac + if run diff -ub $exp $out > $diff; then + run rm $diff + else + if [[ $exit_fails != 0 ]]; then + status=1 + fi + + if [[ $list != 0 ]]; then + echo $diff + else + cat $diff + fi + fi + done +} + +ptt-cpus() { + sed -n 's/[ \t]*;[ \t]*cpu[ \t][ \t]*\(.*\)[ \t]*/\1/p' "$1" +} + +run-ptt-tests() { + local ptt="$1" + local cpus=$cpus + + # if no cpus are given on the command-line, + # use the cpu directives from the pttfile. + if [[ -z $cpus ]]; then + cpus=`ptt-cpus "$ptt"` + fi + + # if there are no cpu directives in the pttfile, + # run the test without any cpu settings. + if [[ -z $cpus ]]; then + run-ptt-test "$ptt" + return + fi + + # otherwise run for each cpu the test. + for i in $cpus; do + run-ptt-test "$ptt" $i + done +} + +for ptt in "$@"; do + run-ptt-tests "$ptt" +done + +exit $status diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt new file mode 100644 index 0000000..a0c35b5 --- /dev/null +++ b/test/CMakeLists.txt @@ -0,0 +1,59 @@ +# Copyright (c) 2015-2017, Intel Corporation +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +function(add_ptt_test name) + set(pttc $) + set(ptdump $) + set(ptxed $) + set(script ${BASH} ${CMAKE_SOURCE_DIR}/script/test.bash) + set(test ${CMAKE_SOURCE_DIR}/test/src/${name}) + + add_test( + NAME insn-${name} + COMMAND ${script} -f -g ${pttc} -d ${ptdump} -x ${ptxed} -X --insn-decoder ${test} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ptt-insn + ) + + add_test( + NAME block-${name} + COMMAND ${script} -f -g ${pttc} -d ${ptdump} -x ${ptxed} -X --block-decoder ${test} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ptt-block + ) +endfunction(add_ptt_test) + +file(GLOB TESTS + LIST_DIRECTORIES false + RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}/src/ + src/*.ptt +) + +file(MAKE_DIRECTORY + ${CMAKE_CURRENT_BINARY_DIR}/ptt-insn + ${CMAKE_CURRENT_BINARY_DIR}/ptt-block +) +foreach (test ${TESTS}) + add_ptt_test(${test}) +endforeach () diff --git a/test/src/bdm64-tip-xabort.ptt b/test/src/bdm64-tip-xabort.ptt new file mode 100644 index 0000000..c878b51 --- /dev/null +++ b/test/src/bdm64-tip-xabort.ptt @@ -0,0 +1,97 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet May Be +; Recorded Following a Transactional Abort. +; +; Use of Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) +; may result in a transactional abort. If an abort occurs immediately +; following a branch instruction, an incorrect branch target may be +; logged in an LBR (Last Branch Record) or in an Intel(R) Processor Trace +; (Intel(R) PT) packet before the LBR or Intel PT packet produced by the +; abort. +; +; cpu 6/61 +; cpu 6/71 +; cpu 6/79 +; cpu 6/86 +; +; Variant: indirect branch. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: mode.tsx(begin) +; @pt p4: fup(3: %l1) +; @pt p5: psbend() + +l1: jmp [rax] +l2: hlt + +; @pt p6: tip(1: %l3) +; +; The branch destination is bad. + +l3: hlt +l4: hlt + +; We immediately take an xabort from there +; +; @pt p7: mode.tsx(abort) +; @pt p8: fup(1: %l3) +; @pt p9: tip(1: %l5) + +l5: nop + +; @pt p10: fup(1: %l6) +; @pt p11: tip.pgd(0: %l7) +l6: nop +l7: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 mode.tsx intx +;%0p4 fup 3: %0l1 +;%0p5 psbend +;%0p6 tip 1: %?l3.2 +;%0p7 mode.tsx abrt +;%0p8 fup 1: %?l3.2 +;%0p9 tip 1: %?l5.2 +;%0p10 fup 1: %?l6.2 +;%0p11 tip.pgd 0: %?l7.0 + + +; @pt .exp(ptxed) +;? %0l1 # jmp [rax] +;[interrupt] +;[aborted] +;%0l5 # nop +;[disabled] diff --git a/test/src/bdm64-tnt-cond-xabort.ptt b/test/src/bdm64-tnt-cond-xabort.ptt new file mode 100644 index 0000000..0c5f574 --- /dev/null +++ b/test/src/bdm64-tnt-cond-xabort.ptt @@ -0,0 +1,107 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet May Be +; Recorded Following a Transactional Abort. +; +; Use of Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) +; may result in a transactional abort. If an abort occurs immediately +; following a branch instruction, an incorrect branch target may be +; logged in an LBR (Last Branch Record) or in an Intel(R) Processor Trace +; (Intel(R) PT) packet before the LBR or Intel PT packet produced by the +; abort. +; +; cpu 6/61 +; cpu 6/71 +; cpu 6/79 +; cpu 6/86 +; +; Variant: conditional branch followed by another conditional branch to +; tell us that we're on the wrong track. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: mode.tsx(begin) +; @pt p4: fup(3: %l1) +; @pt p5: psbend() + +l1: test [rax], rbx +l2: je l5 + +; @pt p6: tnt(n) +; +; The branch destination is bad. +; +; There is no way for us to know this as long as we can reach the bad +; branch destination without further Intel PT support. +; +; If we cannot reach it, however, we know that the branch was bad. +; +l3: nop +l4: je l9 + +l5: hlt +l6: hlt + +; We immediately take an xabort from there +; +; @pt p7: mode.tsx(abort) +; @pt p8: fup(1: %l5) +; @pt p9: tip(1: %l7) + +l7: nop + +; @pt p10: fup(1: %l8) +; @pt p11: tip.pgd(0: %l9) +l8: nop +l9: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 mode.tsx intx +;%0p4 fup 3: %0l1 +;%0p5 psbend +;%0p6 tnt.8 . +;%0p7 mode.tsx abrt +;%0p8 fup 1: %?l5.2 +;%0p9 tip 1: %?l7.2 +;%0p10 fup 1: %?l8.2 +;%0p11 tip.pgd 0: %?l9.0 + + +; @pt .exp(ptxed) +;? %0l1 # test [rax], rbx +;? %0l2 # je l5 +;[interrupt] +;[aborted] +;%0l7 # nop +;[disabled] diff --git a/test/src/bdm64-tnt-ind_call-xabort.ptt b/test/src/bdm64-tnt-ind_call-xabort.ptt new file mode 100644 index 0000000..fa6e6c2 --- /dev/null +++ b/test/src/bdm64-tnt-ind_call-xabort.ptt @@ -0,0 +1,107 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet May Be +; Recorded Following a Transactional Abort. +; +; Use of Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) +; may result in a transactional abort. If an abort occurs immediately +; following a branch instruction, an incorrect branch target may be +; logged in an LBR (Last Branch Record) or in an Intel(R) Processor Trace +; (Intel(R) PT) packet before the LBR or Intel PT packet produced by the +; abort. +; +; cpu 6/61 +; cpu 6/71 +; cpu 6/79 +; cpu 6/86 +; +; Variant: conditional branch followed by an indirect call to tell us that +; we're on the wrong track. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: mode.tsx(begin) +; @pt p4: fup(3: %l1) +; @pt p5: psbend() + +l1: test [rax], rbx +l2: je l5 + +; @pt p6: tnt(n) +; +; The branch destination is bad. +; +; There is no way for us to know this as long as we can reach the bad +; branch destination without further Intel PT support. +; +; If we cannot reach it, however, we know that the branch was bad. +; +l3: nop +l4: call rax + +l5: hlt +l6: hlt + +; We immediately take an xabort from there +; +; @pt p7: mode.tsx(abort) +; @pt p8: fup(1: %l5) +; @pt p9: tip(1: %l7) + +l7: nop + +; @pt p10: fup(1: %l8) +; @pt p11: tip.pgd(0: %l9) +l8: nop +l9: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 mode.tsx intx +;%0p4 fup 3: %0l1 +;%0p5 psbend +;%0p6 tnt.8 . +;%0p7 mode.tsx abrt +;%0p8 fup 1: %?l5.2 +;%0p9 tip 1: %?l7.2 +;%0p10 fup 1: %?l8.2 +;%0p11 tip.pgd 0: %?l9.0 + + +; @pt .exp(ptxed) +;? %0l1 # test [rax], rbx +;? %0l2 # je l5 +;[interrupt] +;[aborted] +;%0l7 # nop +;[disabled] diff --git a/test/src/bdm70-psb_fup-tip_pge.ptt b/test/src/bdm70-psb_fup-tip_pge.ptt new file mode 100644 index 0000000..48c4bec --- /dev/null +++ b/test/src/bdm70-psb_fup-tip_pge.ptt @@ -0,0 +1,77 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; BDM70: Intel(R) Processor Trace PSB+ Packets May Contain Unexpected Packets. +; +; Some Intel Processor Trace packets should be issued only between +; TIP.PGE and TIP.PGD packets. Due to this erratum, when a TIP.PGE +; packet is generated it may be preceded by a PSB+ that incorrectly +; includes FUP and MODE.Exec packets. +; +; cpu 6/61 +; cpu 6/71 +; cpu 6/79 +; cpu 6/86 +; cpu 6/78 +; cpu 6/94 +; +; Variant: sync at the PSB directly preceding the TIP.PGE. +; +; Tracing is already enabled after the sync and the explicit +; enable event is suppressed as duplicate. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: fup(3: %l1) +; @pt p4: mode.tsx(begin) +; @pt p5: psbend() +l1: nop +; @pt p6: tip.pge(3: %l1) +l2: nop +; @pt p7: fup(1: %l2) +; @pt p8: tip.pgd(0: %l3) +l3: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 fup 3: %0l1 +;%0p4 mode.tsx intx +;%0p5 psbend +;%0p6 tip.pge 3: %0l1 +;%0p7 fup 1: %?l2.2 +;%0p8 tip.pgd 0: %?l3.0 + + +; @pt .exp(ptxed) +;[enabled] +;? %0l1 # nop +;[disabled] diff --git a/test/src/bdm70-tip_pgd-psb_fup-tip_pge.ptt b/test/src/bdm70-tip_pgd-psb_fup-tip_pge.ptt new file mode 100644 index 0000000..a9b1f2c --- /dev/null +++ b/test/src/bdm70-tip_pgd-psb_fup-tip_pge.ptt @@ -0,0 +1,95 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; BDM70: Intel(R) Processor Trace PSB+ Packets May Contain Unexpected Packets. +; +; Some Intel Processor Trace packets should be issued only between +; TIP.PGE and TIP.PGD packets. Due to this erratum, when a TIP.PGE +; packet is generated it may be preceded by a PSB+ that incorrectly +; includes FUP and MODE.Exec packets. +; +; cpu 6/61 +; cpu 6/71 +; cpu 6/79 +; cpu 6/86 +; cpu 6/78 +; cpu 6/94 +; +; Variant: sync at an earlier PSB. +; +; Process status updates in the PSB+ directly preceding TIP.PGE +; with tracing still disabled. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: mode.tsx(begin) +; @pt p4: fup(3: %l1) +; @pt p5: psbend() +l1: jle l4 +; @pt p6: tip.pgd(1: %l2) +l2: nop + +; @pt p7: psb() +; @pt p8: mode.exec(64bit) +; @pt p9: fup(3: %l3) +; @pt p10: mode.tsx(begin) +; @pt p11: psbend() + +; @pt p12: tip.pge(3: %l3) +l3: nop +l4: nop +; @pt p13: fup(1: %l4) +; @pt p14: tip.pgd(0: %l5) +l5: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 mode.tsx intx +;%0p4 fup 3: %0l1 +;%0p5 psbend +;%0p6 tip.pgd 1: %?l2.2 +;%0p7 psb +;%0p8 mode.exec cs.l +;%0p9 fup 3: %0l3 +;%0p10 mode.tsx intx +;%0p11 psbend +;%0p12 tip.pge 3: %0l3 +;%0p13 fup 1: %?l4.2 +;%0p14 tip.pgd 0: %?l5.0 + + +; @pt .exp(ptxed) +;? %0l1 # jle l4 +;[disabled] +;[enabled] +;? %0l3 # nop +;[disabled] diff --git a/test/src/call_direct-ret_compressed-pic.ptt b/test/src/call_direct-ret_compressed-pic.ptt new file mode 100644 index 0000000..bdc91f8 --- /dev/null +++ b/test/src/call_direct-ret_compressed-pic.ptt @@ -0,0 +1,68 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a compressed return for a direct call +; +; Variant: ignore a call with zero displacement +; + +org 0x100000 +bits 32 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(32bit) +; @pt p4: psbend() +l1: call l4 +l2: jz l6 +l3: hlt + +; @pt p5: tnt(t.t) +l4: call l5 +l5: ret + +; @pt p6: fup(1: %l6) +; @pt p7: tip.pgd(0: %l7) +l6: nop +l7: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %?l1 +;%0p3 mode.exec cs.d +;%0p4 psbend +;%0p5 tnt.8 !! +;%0p6 fup 1: %?l6.2 +;%0p7 tip.pgd 0: %?l7.0 + + +; @pt .exp(ptxed) +;%0l1 # call l4 +;%0l4 # call . +;%0l5 # ret +;%0l2 # jz l6 +;[disabled] diff --git a/test/src/call_direct-ret_compressed.ptt b/test/src/call_direct-ret_compressed.ptt new file mode 100644 index 0000000..f893368 --- /dev/null +++ b/test/src/call_direct-ret_compressed.ptt @@ -0,0 +1,62 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a compressed return for a direct call +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +l1: call l4 +l2: jz l5 +l3: hlt +l4: ret +; @pt p5: tnt(t.t) +l5: nop +; @pt p6: fup(1: %l5) +; @pt p7: tip.pgd(0: 0) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tnt.8 !! +;%0p6 fup 1: %?l5.2 +;%0p7 tip.pgd 0: ???????????????? + + +; @pt .exp(ptxed) +;%0l1 # call l4 +;%0l4 # ret +;%0l2 # jz l5 +;[disabled] diff --git a/test/src/call_direct-ret_uncompressed.ptt b/test/src/call_direct-ret_uncompressed.ptt new file mode 100644 index 0000000..ca0f7ee --- /dev/null +++ b/test/src/call_direct-ret_uncompressed.ptt @@ -0,0 +1,61 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a non-compressed return for a direct call +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +l1: call l4 +l2: nop +l3: nop +l4: ret +; @pt p5: tip(1: %l2) +; @pt p6: fup(1: %l3) +; @pt p7: tip.pgd(0: 0) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip 1: %?l2.2 +;%0p6 fup 1: %?l3.2 +;%0p7 tip.pgd 0: ???????????????? + + +; @pt .exp(ptxed) +;%0l1 # call l4 +;%0l4 # ret +;%0l2 # nop +;[disabled] diff --git a/test/src/call_indirect-ret_compressed.ptt b/test/src/call_indirect-ret_compressed.ptt new file mode 100644 index 0000000..8bb3d9c --- /dev/null +++ b/test/src/call_indirect-ret_compressed.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a compressed return for an indirect call +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +l1: call rbx +; @pt p5: tip(1: %l4) +l2: nop +l3: hlt +l4: ret +; @pt p6: tnt(t) +; @pt p7: fup(1: %l3) +; @pt p8: tip.pgd(0: 0) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip 1: %?l4.2 +;%0p6 tnt.8 ! +;%0p7 fup 1: %?l3.2 +;%0p8 tip.pgd 0: ???????????????? + + +; @pt .exp(ptxed) +;%0l1 # call rbx +;%0l4 # ret +;%0l2 # nop +;[disabled] diff --git a/test/src/call_indirect-ret_uncompressed.ptt b/test/src/call_indirect-ret_uncompressed.ptt new file mode 100644 index 0000000..65293fe --- /dev/null +++ b/test/src/call_indirect-ret_uncompressed.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a non-compressed return for an indirect call +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +l1: call rbx +; @pt p5: tip(1: %l4) +l2: nop +l3: nop +l4: ret +; @pt p6: tip(1: %l2) +; @pt p7: fup(1: %l3) +; @pt p8: tip.pgd(0: 0) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip 1: %?l4.2 +;%0p6 tip 1: %?l2.2 +;%0p7 fup 1: %?l3.2 +;%0p8 tip.pgd 0: ???????????????? + + +; @pt .exp(ptxed) +;%0l1 # call rbx +;%0l4 # ret +;%0l2 # nop +;[disabled] diff --git a/test/src/call_indirect_deferred-ret_compressed.ptt b/test/src/call_indirect_deferred-ret_compressed.ptt new file mode 100644 index 0000000..7237a17 --- /dev/null +++ b/test/src/call_indirect_deferred-ret_compressed.ptt @@ -0,0 +1,65 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a compressed return for an indirect call with deferred tip +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +l1: call rbx +; tip deferred +l2: jz l5 +l3: hlt +l4: ret +; @pt p5: tnt(t.t) +; @pt p6: tip(1: %l4) +l5: nop +; @pt p7: fup(1: %l5) +; @pt p8: tip.pgd(0: 0) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tnt.8 !! +;%0p6 tip 1: %?l4.2 +;%0p7 fup 1: %?l5.2 +;%0p8 tip.pgd 0: ???????????????? + + +; @pt .exp(ptxed) +;%0l1 # call rbx +;%0l4 # ret +;%0l2 # jz l5 +;[disabled] diff --git a/test/src/cbr-cyc.ptt b/test/src/cbr-cyc.ptt new file mode 100644 index 0000000..c33c124 --- /dev/null +++ b/test/src/cbr-cyc.ptt @@ -0,0 +1,55 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test CYC-based timing. +; +; Variant: CBR-based calibration +; +; opt:ptdump --time --time-delta --no-wall-clock +; opt:ptdump --nom-freq 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: cbr(0x2) +; @pt p3: psbend() + +; @pt p4: cyc(0x3) +; @pt p5: cyc(0x1) + +; @pt p6: cbr(0x1) +; @pt p7: cyc(0x2) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 cbr 2 +;%0p3 psbend +;%0p4 cyc 3 tsc +6 +;%0p5 cyc 1 tsc +2 +;%0p6 cbr 1 +;%0p7 cyc 2 tsc +8 diff --git a/test/src/cbr-mtc-cyc-mtc.ptt b/test/src/cbr-mtc-cyc-mtc.ptt new file mode 100644 index 0000000..c637f7b --- /dev/null +++ b/test/src/cbr-mtc-cyc-mtc.ptt @@ -0,0 +1,54 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based timing. +; +; Variant: CBR-based calibration, time correction on MTC +; +; opt:ptdump --time --time-delta --no-wall-clock +; opt:ptdump --nom-freq 4 --mtc-freq 0 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: cbr(0x2) +; @pt p3: psbend() + +; @pt p4: mtc(0x2) +; @pt p5: cyc(0x3) +; @pt p6: cyc(0x1) +; @pt p7: mtc(0x3) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 cbr 2 +;%0p3 psbend +;%0p4 mtc 2 tsc +0 +;%0p5 cyc 3 tsc +6 +;%0p6 cyc 1 tsc +2 +;%0p7 mtc 3 tsc -4 diff --git a/test/src/cbr-tsc-cyc-tma.ptt b/test/src/cbr-tsc-cyc-tma.ptt new file mode 100644 index 0000000..8ea86a1 --- /dev/null +++ b/test/src/cbr-tsc-cyc-tma.ptt @@ -0,0 +1,57 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test CYC-based TSC estimation. +; +; Variant: CBR-based calibration, +; CYC between TSC and TMA +; +; opt:ptdump --time --time-delta +; opt:ptdump --nom-freq 1 --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: cbr(0x2) +; @pt p3: psbend() + +; @pt p4: tsc(0xa0000) +; @pt p5: cyc(0x6) +; @pt p6: tma(0x102, 0x8) +; @pt p7: cyc(0x8) +; @pt p8: cyc(0x4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 cbr 2 +;%0p3 psbend +;%0p4 tsc a0000 tsc +a0000 +;%0p5 cyc 6 tsc +3 +;%0p6 tma 102, 8 tsc +0 +;%0p7 cyc 8 tsc +4 +;%0p8 cyc 4 tsc +2 diff --git a/test/src/cbr-tsc-tma-mtc-cyc.ptt b/test/src/cbr-tsc-tma-mtc-cyc.ptt new file mode 100644 index 0000000..0765664 --- /dev/null +++ b/test/src/cbr-tsc-tma-mtc-cyc.ptt @@ -0,0 +1,56 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: CBR-based calibration, CBR before TSC +; +; opt:ptdump --time --time-delta +; opt:ptdump --nom-freq 4 --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: cbr(0x2) +; @pt p4: tsc(0xa0000) +; @pt p5: tma(0x102, 0x8) +; @pt p6: mtc(0x2) +; @pt p7: cyc(0x3) +; @pt p8: cyc(0x1) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 cbr 2 +;%0p4 tsc a0000 tsc +a0000 +;%0p5 tma 102, 8 tsc +0 +;%0p6 mtc 2 tsc +3f0 +;%0p7 cyc 3 tsc +6 +;%0p8 cyc 1 tsc +2 diff --git a/test/src/direct_call-tip_pgd_noip-syscall.ptt b/test/src/direct_call-tip_pgd_noip-syscall.ptt new file mode 100644 index 0000000..0a5a5fe --- /dev/null +++ b/test/src/direct_call-tip_pgd_noip-syscall.ptt @@ -0,0 +1,60 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD with suppressed IP payload is applied to the next far branch +; (syscall in this case). +; +; Variant: there's a direct call on our way to the syscall. +; test that the disable event is not applied too early. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: call l3 +l2: hlt + +l3: syscall +l4: ret +; @pt p5: tip.pgd(0: %l4) + + +; @pt .exp(ptxed) +;%0l1 # call l3 +;%0l3 # syscall +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 0: %?l4.0 diff --git a/test/src/direct_jump-tip_pgd_noip-far_call.ptt b/test/src/direct_jump-tip_pgd_noip-far_call.ptt new file mode 100644 index 0000000..6cde70e --- /dev/null +++ b/test/src/direct_jump-tip_pgd_noip-far_call.ptt @@ -0,0 +1,61 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD with suppressed IP payload is applied to the next far branch +; (far call in this case). +; +; Variant: there's a direct jump on our way to the far call. +; test that the disable event is not applied too early. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: jmp l3 +l2: hlt + +l3: call far [rax] ; l5 +l4: hlt +; @pt p5: tip.pgd(0: %l5) + +l5: hlt + +; @pt .exp(ptxed) +;%0l1 # jmp l3 +;%0l3 # call far [rax] ; l5 +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 0: %?l5.0 diff --git a/test/src/dump-all-packets.ptt b/test/src/dump-all-packets.ptt new file mode 100644 index 0000000..891442a --- /dev/null +++ b/test/src/dump-all-packets.ptt @@ -0,0 +1,123 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test dumping of all packets. +; +; opt:ptdump --no-sync + +org 0x42 +bits 64 + +; @pt p01: psbend() +; @pt p02: psb() +; @pt p03: pad() +; @pt p04: ovf() +; @pt p05: tnt(tnnnt) +; @pt p06: tnt64(tnntnntnnttnntnntnnttnntnntnnttnntnntnnttnnttnt) +; @pt p07: tip(3: 0xffffffffffff1000) +; @pt p08: tip(3: 0x1000) +; @pt p09: tip(2: 0x1001) +; @pt p10: tip(1: 0x1002) +; @pt p11: tip(0: 0x1003) +; @pt p12: tip.pge(3: 0xffffffffffff2000) +; @pt p13: tip.pge(3: 0x2000) +; @pt p14: tip.pge(2: 0x2001) +; @pt p15: tip.pge(1: 0x2002) +; @pt p16: tip.pge(0: 0x2003) +; @pt p17: tip.pgd(3: 0xffffffffffff3000) +; @pt p18: tip.pgd(3: 0x3000) +; @pt p19: tip.pgd(2: 0x3001) +; @pt p20: tip.pgd(1: 0x3002) +; @pt p21: tip.pgd(0: 0x3003) +; @pt p22: fup(3: 0xffffffffffff4000) +; @pt p23: fup(3: 0x4000) +; @pt p24: fup(2: 0x4001) +; @pt p25: fup(1: 0x4002) +; @pt p26: fup(0: 0x4003) +; @pt p27: mode.exec(16bit) +; @pt p28: mode.exec(32bit) +; @pt p29: mode.exec(64bit) +; @pt p30: mode.tsx(begin) +; @pt p31: mode.tsx(commit) +; @pt p32: mode.tsx(abort) +; @pt p33: pip(0xafafaf) +; @pt p34: pip(0xafafaf, nr) +; @pt p35: tsc(0x12345) +; @pt p36: cbr(0x24) +; @pt p37: tma(0x12, 0x34) +; @pt p38: mtc(0x94) +; @pt p39: cyc(0x57) +; @pt p40: stop() +; @pt p41: vmcs(0xabcd000) +; @pt p42: mnt(0x2a2242e5d4c3b2a1) +; @pt p43: tip(6: 0x00cccccccccc4000) +; @pt p44: tip(4: 0xffffffff4000) + +; @pt .exp(ptdump) +;%0p01 psbend +;%0p02 psb +;%0p03 pad +;%0p04 ovf +;%0p05 tnt.8 !...! +;%0p06 tnt.64 !..!..!..!!..!..!..!!..!..!..!!..!..!..!!..!!.! +;%0p07 tip 3: ffffffffffff1000 +;%0p08 tip 3: 0000000000001000 +;%0p09 tip 2: ????????00001001 +;%0p10 tip 1: ????????????1002 +;%0p11 tip 0: ???????????????? +;%0p12 tip.pge 3: ffffffffffff2000 +;%0p13 tip.pge 3: 0000000000002000 +;%0p14 tip.pge 2: ????????00002001 +;%0p15 tip.pge 1: ????????????2002 +;%0p16 tip.pge 0: ???????????????? +;%0p17 tip.pgd 3: ffffffffffff3000 +;%0p18 tip.pgd 3: 0000000000003000 +;%0p19 tip.pgd 2: ????????00003001 +;%0p20 tip.pgd 1: ????????????3002 +;%0p21 tip.pgd 0: ???????????????? +;%0p22 fup 3: ffffffffffff4000 +;%0p23 fup 3: 0000000000004000 +;%0p24 fup 2: ????????00004001 +;%0p25 fup 1: ????????????4002 +;%0p26 fup 0: ???????????????? +;%0p27 mode.exec +;%0p28 mode.exec cs.d +;%0p29 mode.exec cs.l +;%0p30 mode.tsx intx +;%0p31 mode.tsx +;%0p32 mode.tsx abrt +;%0p33 pip afafa0 cr3 0000000000afafa0 +;%0p34 pip afafa0, nr cr3 0000000000afafa0 +;%0p35 tsc 12345 +;%0p36 cbr 24 +;%0p37 tma 12, 34 +;%0p38 mtc 94 +;%0p39 cyc 57 +;%0p40 stop +;%0p41 vmcs abcd000 vmcs 000000000abcd000 +;%0p42 mnt 2a2242e5d4c3b2a1 +;%0p43 tip 6: 00cccccccccc4000 +;%0p44 tip 4: ????ffffffff4000 diff --git a/test/src/fup-pip-vmcs-tip.ptt b/test/src/fup-pip-vmcs-tip.ptt new file mode 100644 index 0000000..86efc25 --- /dev/null +++ b/test/src/fup-pip-vmcs-tip.ptt @@ -0,0 +1,69 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a PIP and VMCS binding to an in-flight asynchronous branch. +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop + +; @pt p4: fup(1: %l1) +l1: hlt + +; @pt p5: pip(0xcdcdc0) +; @pt p6: vmcs(0xcdcdc000) +; @pt p7: tip(3: %l2) +l2: nop + +; @pt p8:fup(1: %l3) +; @pt p9:tip.pgd(0: %l3) +l3: hlt + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 fup 1: %?l1.2 +;%0p5 pip cdcdc0 cr3 0000000000cdcdc0 +;%0p6 vmcs cdcdc000 vmcs 00000000cdcdc000 +;%0p7 tip 3: %?l2 +;%0p8 fup 1: %?l3.2 +;%0p9 tip.pgd 0: %?l3.0 + + +; @pt .exp(ptxed) +;%0l0 +;[interrupt] +;%0l2 +;[disabled] diff --git a/test/src/fup-tip-eos.ptt b/test/src/fup-tip-eos.ptt new file mode 100644 index 0000000..b78c916 --- /dev/null +++ b/test/src/fup-tip-eos.ptt @@ -0,0 +1,58 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that we indicate the end of the trace without a TIP.PGD. +; +; Variant: the trace ends after an asynchronous branch +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop +l1: hlt + +; @pt p4:fup(1: %l1) +; @pt p5:tip(1: %l2) +l2: hlt + + +; @pt .exp(ptxed) +;%0l0 +;[interrupt] +;[end of trace] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %0l0 +;%0p3 psbend +;%0p4 fup 1: %?l1.2 +;%0p5 tip 1: %?l2.2 diff --git a/test/src/fup-tip-fup-tip_pgd.ptt b/test/src/fup-tip-fup-tip_pgd.ptt new file mode 100644 index 0000000..57e8969 --- /dev/null +++ b/test/src/fup-tip-fup-tip_pgd.ptt @@ -0,0 +1,67 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that FUP + TIP.PGD disables tracing as part of the asynchronous +; branch. +; +; Variant: Tracing is disabled after an interrupt before executing the first +; instruction of the interrupt handler (e.g. due to another interrupt). +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: nop +l2: hlt + +; @pt p5: fup(1: %l2) +; @pt p6: tip(3: %l3) +l3: hlt +l4: hlt + +; @pt p7: fup(1: %l3) +; @pt p8: tip.pgd(0: %l4) + + +; @pt .exp(ptxed) +;%0l1 # nop +;[interrupt] +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 fup 1: %?l2.2 +;%0p6 tip 3: %?l3 +;%0p7 fup 1: %?l3.2 +;%0p8 tip.pgd 0: %?l4.0 diff --git a/test/src/fup-tip.ptt b/test/src/fup-tip.ptt new file mode 100644 index 0000000..a72879d --- /dev/null +++ b/test/src/fup-tip.ptt @@ -0,0 +1,70 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a combinarion of FUP + TIP representing an asynchronous branch such +; as an interrupt with both ring-0 and ring-3 traced. +; + +org 0x1000 +bits 64 + +; @pt p0:psb() +; @pt p1: mode.exec(64bit) +; @pt p2:psbend() +; @pt p3:tip.pge(3: %l0) +l0: nop +l1: nop + +; The fup contains the IP of the instruction after the last one that was +; executed. +; @pt p4:fup(3: %l2) +l2: nop + +; @pt p5:tip(3: %l3) +l3: nop + +; @pt p6:fup(3: %l4) +; @pt p7:tip.pgd(0: 0) +l4: nop + + +; @pt .exp(ptxed) +;[enabled] +;%0l0 +;%0l1 +;[interrupt] +;%0l3 +;[disabled] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 psbend +;%0p3 tip.pge 3: %0l0 +;%0p4 fup 3: %0l2 +;%0p5 tip 3: %0l3 +;%0p6 fup 3: %0l4 +;%0p7 tip.pgd 0: ???????????????? diff --git a/test/src/fup-tip_pgd-tip_pge.ptt b/test/src/fup-tip_pgd-tip_pge.ptt new file mode 100644 index 0000000..e6b88ee --- /dev/null +++ b/test/src/fup-tip_pgd-tip_pge.ptt @@ -0,0 +1,62 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a combination of enable and async disable on the same IP. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: fup(3: %l1) +; @pt p4: psbend() +l1: nop +; @pt p5: fup(1: %l1) +; @pt p6: tip.pgd(0: %l1) +; @pt p7: tip.pge(3: %l1) +l2: nop +; @pt p8: fup(1: %l2) +; @pt p9: tip.pgd(0: %l3) +l3: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 fup 3: %0l1 +;%0p4 psbend +;%0p5 fup 1: %?l1.2 +;%0p6 tip.pgd 0: %?l1.0 +;%0p7 tip.pge 3: %0l1 +;%0p8 fup 1: %?l2.2 +;%0p9 tip.pgd 0: %?l3.0 + + +; @pt .exp(ptxed) +;[resumed] +;%0l1 # nop +;[disabled] diff --git a/test/src/fup-tip_pgd-tip_pge_other_ip.ptt b/test/src/fup-tip_pgd-tip_pge_other_ip.ptt new file mode 100644 index 0000000..76b7cdf --- /dev/null +++ b/test/src/fup-tip_pgd-tip_pge_other_ip.ptt @@ -0,0 +1,66 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a combination of async disable and enable on a different IP. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: fup(3: %l1) +; @pt p4: psbend() +l1: nop +l2: hlt +; @pt p5: fup(1: %l2) +; @pt p6: tip.pgd(0: %l2) +l3: nop +; @pt p7: tip.pge(3: %l3) +l4: nop +; @pt p8: fup(1: %l4) +; @pt p9: tip.pgd(0: %l5) +l5: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 fup 3: %0l1 +;%0p4 psbend +;%0p5 fup 1: %?l2.2 +;%0p6 tip.pgd 0: %?l2.0 +;%0p7 tip.pge 3: %0l3 +;%0p8 fup 1: %?l4.2 +;%0p9 tip.pgd 0: %?l5.0 + + +; @pt .exp(ptxed) +;%0l1 # nop +;[disabled] +;[enabled] +;%0l3 # nop +;[disabled] diff --git a/test/src/fup-tip_pgd.ptt b/test/src/fup-tip_pgd.ptt new file mode 100644 index 0000000..e7288d6 --- /dev/null +++ b/test/src/fup-tip_pgd.ptt @@ -0,0 +1,56 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that FUP + TIP.PGD disables tracing as part of the asynchronous +; branch. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: nop +l2: nop +l3: nop +; @pt p5: fup(1: %l2) +; @pt p6: tip.pgd(3: %l3) + + +; @pt .exp(ptxed) +;%0l1 # nop +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 fup 1: %?l2.2 +;%0p6 tip.pgd 3: %?l3 diff --git a/test/src/fup-tip_pgd_noip.ptt b/test/src/fup-tip_pgd_noip.ptt new file mode 100644 index 0000000..8a4d119 --- /dev/null +++ b/test/src/fup-tip_pgd_noip.ptt @@ -0,0 +1,56 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that FUP + TIP.PGD disables tracing as part of the asynchronous +; branch (with suppressed TIP.PGD payload in this case). +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: nop +l2: nop +l3: nop +; @pt p5: fup(1: %l2) +; @pt p6: tip.pgd(0: %l3) + + +; @pt .exp(ptxed) +;%0l1 # nop +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 fup 1: %?l2.2 +;%0p6 tip.pgd 0: %?l3.0 diff --git a/test/src/int-iret-cpl_0.ptt b/test/src/int-iret-cpl_0.ptt new file mode 100644 index 0000000..f6790c6 --- /dev/null +++ b/test/src/int-iret-cpl_0.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that an INT followed by an IRET are decoded correctly. +; +; Variant: cpl 3 filtered out +; + +org 0x100000 +bits 64 +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: psbend() + +; @pt p4: tip.pge(3: %l5) + +l1: int 42 +l2: nop +l3: nop +l4: hlt + +l5: nop +l6: iret +l7: hlt + +; @pt p5: tip.pgd(0: %l2) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 psbend +;%0p4 tip.pge 3: %0l5 +;%0p5 tip.pgd 0: %?l2.0 + +; @pt .exp(ptxed) +;[enabled] +;%0l5 # nop +;%0l6 # iret +;[disabled] diff --git a/test/src/int-iret-cpl_3.ptt b/test/src/int-iret-cpl_3.ptt new file mode 100644 index 0000000..b954dcf --- /dev/null +++ b/test/src/int-iret-cpl_3.ptt @@ -0,0 +1,94 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that an INT followed by an IRET are decoded correctly. +; +; Variant: cpl 0 filtered out +; +; +; Software interrupts receive FUP(CLIP) + TIP(BLIP?) +; +; We can not reliably determine whether the FUP/TIP belongs to the +; software interrupt or to an asynchronous interrupt that was taken +; before the instruction. +; +; To distinguish the two cases we would need to read ahead but that +; may require decoding an unknown amount of code (e.g. in different +; processes for system-wide ring-3 tracing) until we return either +; to CLIP if it was an asynchronous interrupt or to NLIP (or even to +; a later IP) if it wasn't. +; +; Instead, we assume that it was an asynchronous interrupt. Tracing +; appears to get disabled before and re-enabled after the instruction. +; +; This is wrong most of the time. But it is predictably wrong and it +; avoids the case where we incorrectly assume a synchronous transfer +; and get out of sync. +; + +org 0x100000 +bits 64 +; @pt p1: psb() +; @pt p2: fup(3: %l0) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l0: nop + +; @pt p5: fup(1: %l1) +; @pt p6: tip.pgd(0: %l5) + +l1: int 42 +l2: nop +l3: nop +l4: hlt + +l5: nop +l6: iret +l7: hlt + +; @pt p7: tip.pge(3: %l2) + +; @pt p8: fup(1: %l3) +; @pt p9: tip.pgd(0: %l4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l0 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 fup 1: %?l1.2 +;%0p6 tip.pgd 0: %?l5.0 +;%0p7 tip.pge 3: %?l2 +;%0p8 fup 1: %?l3.2 +;%0p9 tip.pgd 0: %?l4.0 + +; @pt .exp(ptxed) +;%0l0 # nop - missing: l1: int 42 +;[disabled] +;[enabled] +;%0l2 # nop +;[disabled] diff --git a/test/src/int-iret.ptt b/test/src/int-iret.ptt new file mode 100644 index 0000000..ac6326a --- /dev/null +++ b/test/src/int-iret.ptt @@ -0,0 +1,96 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that an INT followed by an IRET are decoded correctly. +; +; Variant: no cpl filtering +; +; +; Software interrupts receive FUP(CLIP) + TIP(BLIP?) +; +; We can not reliably determine whether the FUP/TIP belongs to the +; software interrupt or to an asynchronous interrupt that was taken +; before the instruction. +; +; To distinguish the two cases we would need to read ahead but that +; may require decoding an unknown amount of code (in the kernel or +; hypervisor or even in different processes) until we return either +; to CLIP if it was an asynchronous interrupt or to NLIP (or even to +; a later IP) if it wasn't. +; +; Instead, we assume that it was an asynchronous interrupt. Control +; appears to flow from before the software interrupt instruction to +; the interrupt handler and back after the instruction. +; +; This is wrong most of the time. But it is predictably wrong and it +; avoids the case where we incorrectly assume a synchronous transfer +; and get out of sync. +; + +org 0x100000 +bits 64 +; @pt p1: psb() +; @pt p2: fup(3: %l0) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l0: nop + +; @pt p5: fup(1: %l1) +; @pt p6: tip(3: %l5) + +l1: int 42 +l2: nop +l3: nop +l4: hlt + +l5: nop +l6: iret +l7: hlt + +; @pt p7: tip(3: %l2) + +; @pt p8: fup(1: %l3) +; @pt p9: tip.pgd(0: %l4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l0 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 fup 1: %?l1.2 +;%0p6 tip 3: %0l5 +;%0p7 tip 3: %0l2 +;%0p8 fup 1: %?l3.2 +;%0p9 tip.pgd 0: %?l4.0 + +; @pt .exp(ptxed) +;%0l0 # nop +;[interrupt] # this should really be: l1: int 42 +;%0l5 # nop +;%0l6 # iret +;%0l2 # nop +;[disabled] diff --git a/test/src/linear-fup-tip_pgd.ptt b/test/src/linear-fup-tip_pgd.ptt new file mode 100644 index 0000000..acc65fb --- /dev/null +++ b/test/src/linear-fup-tip_pgd.ptt @@ -0,0 +1,59 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a rather long linear trace. To keep the test file small, we only check +; the number of instructions. +; +; opt:ptxed --quiet --stat --stat:insn +; +; Variant: linear trace ends with disabled event. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l0) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l0: times 100000 nop + +l1: hlt +; @pt p5: fup(2: %l1) +; @pt p6: tip.pgd(0: %l1) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %?l0 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 fup 2: %?l1.4 +;%0p6 tip.pgd 0: %?l1.0 + + +; @pt .exp(ptxed) +;insn: 100000. diff --git a/test/src/linear-tip.ptt b/test/src/linear-tip.ptt new file mode 100644 index 0000000..7c5f238 --- /dev/null +++ b/test/src/linear-tip.ptt @@ -0,0 +1,65 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a rather long linear trace. To keep the test file small, we only check +; the number of instructions. +; +; opt:ptxed --quiet --stat --stat:insn +; +; Variant: linear trace ends with an indirect branch. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l0) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l0: times 50000 nop + +; @pt p5: tip(2: %l0) +; @pt p6: tip(2: %l2) +l1: jmp rax + +; @pt p7: fup(1: %l2) +; @pt p8: tip.pgd(0: %l2) +l2: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %?l0 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip 2: %?l0.4 +;%0p6 tip 2: %?l2.4 +;%0p7 fup 1: %?l2.2 +;%0p8 tip.pgd 0: %?l2.0 + + +; @pt .exp(ptxed) +;insn: 100002. diff --git a/test/src/loop-tnt-64.ptt b/test/src/loop-tnt-64.ptt new file mode 100644 index 0000000..cef1be6 --- /dev/null +++ b/test/src/loop-tnt-64.ptt @@ -0,0 +1,193 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a big for loop +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +l1: mov rax, 0x0 +l2: jmp l4 +l3: add rax, 0x1 +l4: cmp rax, 0x2a +l5: jle l3 +; @pt p5: tnt64(tttttttttttttttttttttttttttttttttttttttttttn) +; @pt p6: fup(3: %l6) +; @pt p7: tip.pgd(0: 0) +l6: leave + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tnt.64 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!. +;%0p6 fup 3: %0l6 +;%0p7 tip.pgd 0: ???????????????? + + +; @pt .exp(ptxed) +;%0l1 # mov rax, 0x0 +;%0l2 # jmp l4 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x2a +;%0l5 # jle l3 +;[disabled] diff --git a/test/src/loop-tnt-tnt.ptt b/test/src/loop-tnt-tnt.ptt new file mode 100644 index 0000000..00d3dab --- /dev/null +++ b/test/src/loop-tnt-tnt.ptt @@ -0,0 +1,90 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a simple for loop using two TNT packets. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +l1: mov rax, 0x0 +l2: jmp l4 +l3: add rax, 0x1 +l4: cmp rax, 0x7 +l5: jle l3 +; @pt p5: tnt(t.t.t.t.t.t) +; @pt p6: tnt(t.t.n) +; @pt p7: fup(3: %l6) +; @pt p8: tip.pgd(0: 0) +l6: leave + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tnt.8 !!!!!! +;%0p6 tnt.8 !!. +;%0p7 fup 3: %0l6 +;%0p8 tip.pgd 0: ???????????????? + + +; @pt .exp(ptxed) +;%0l1 # mov rax, 0x0 +;%0l2 # jmp l4 +;%0l4 # cmp rax, 0x7 +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x7 +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x7 +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x7 +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x7 +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x7 +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x7 +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x7 +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x7 +;%0l5 # jle l3 +;[disabled] diff --git a/test/src/loop-tnt.ptt b/test/src/loop-tnt.ptt new file mode 100644 index 0000000..65446cb --- /dev/null +++ b/test/src/loop-tnt.ptt @@ -0,0 +1,70 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a simple for loop +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +l1: mov rax, 0x0 +l2: jmp l4 +l3: add rax, 0x1 +l4: cmp rax, 0x1 +l5: jle l3 +; @pt p5: tnt(t.t.n) +; @pt p6: fup(3: %l6) +; @pt p7: tip.pgd(0: 0) +l6: leave + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tnt.8 !!. +;%0p6 fup 3: %0l6 +;%0p7 tip.pgd 0: ???????????????? + + +; @pt .exp(ptxed) +;%0l1 # mov rax, 0x0 +;%0l2 # jmp l4 +;%0l4 # cmp rax, 0x1 +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x1 +;%0l5 # jle l3 +;%0l3 # add rax, 0x1 +;%0l4 # cmp rax, 0x1 +;%0l5 # jle l3 +;[disabled] diff --git a/test/src/mode_exec-tip.ptt b/test/src/mode_exec-tip.ptt new file mode 100644 index 0000000..7f4db8f --- /dev/null +++ b/test/src/mode_exec-tip.ptt @@ -0,0 +1,66 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a MODE.EXEC binding to a TIP. +; +; The block decoder used to fail on this as it was not able to reach the +; MODE.EXEC event IP over the far branch that caused the TIP. +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(32bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: jmp far [rax] +l1: hlt + +; @pt p4: mode.exec(64bit) +; @pt p5: tip(3: %l2) +l2: nop + +; @pt p6:fup(1: %l3) +; @pt p7:tip.pgd(0: %l3) +l3: hlt + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.d +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 mode.exec cs.l +;%0p5 tip 3: %?l2 +;%0p6 fup 1: %?l3.2 +;%0p7 tip.pgd 0: %?l3.0 + + +; @pt .exp(ptxed) +;%0l0 +;%0l2 +;[disabled] diff --git a/test/src/mtc-cyc_calibrate.ptt b/test/src/mtc-cyc_calibrate.ptt new file mode 100644 index 0000000..8b6464e --- /dev/null +++ b/test/src/mtc-cyc_calibrate.ptt @@ -0,0 +1,56 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based timing. +; +; Variant: MTC-based calibration +; +; opt:ptdump --time --time-delta --no-wall-clock +; opt:ptdump --mtc-freq 4 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: mtc(0x2) +; @pt p4: cyc(0x100) +; @pt p5: mtc(0x3) +; @pt p6: cyc(0x100) +; @pt p7: mtc(0x4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 mtc 2 tsc +0 +;[%p4: calibration error: no timing information] +;[%p4: error updating time: no calibration] +;%0p4 cyc 100 tsc +0 +;%0p5 mtc 3 tsc +40 +;%0p6 cyc 100 tsc +40 +;%0p7 mtc 4 tsc +0 diff --git a/test/src/mtc.ptt b/test/src/mtc.ptt new file mode 100644 index 0000000..f55aabe --- /dev/null +++ b/test/src/mtc.ptt @@ -0,0 +1,50 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC-based timing. +; +; Variant: No calibration needed. +; +; opt:ptdump --time --time-delta --no-tcal --no-wall-clock +; opt:ptdump --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: mtc(0xc1) +; @pt p4: mtc(0xc2) +; @pt p5: mtc(0xc4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 mtc c1 tsc +0 +;%0p4 mtc c2 tsc +400 +;%0p5 mtc c4 tsc +800 diff --git a/test/src/ovf-fup.ptt b/test/src/ovf-fup.ptt new file mode 100644 index 0000000..e609447 --- /dev/null +++ b/test/src/ovf-fup.ptt @@ -0,0 +1,64 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test packet generation overflow +; +; Variant: tracing remains enabled +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: hlt + +; @pt p4: ovf() +; @pt p5: fup(3: %l1) +l1: nop + +; @pt p6: fup(1: %l2) +; @pt p7: tip.pgd(0: %l3) +l2: nop +l3: hlt + + +; @pt .exp(ptxed) +;[overflow] +;%0l1 +;[disabled] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 ovf +;%0p5 fup 3: %?l1 +;%0p6 fup 1: %?l2.2 +;%0p7 tip.pgd 0: %?l3.0 diff --git a/test/src/ovf-timing-fup.ptt b/test/src/ovf-timing-fup.ptt new file mode 100644 index 0000000..7e126cd --- /dev/null +++ b/test/src/ovf-timing-fup.ptt @@ -0,0 +1,76 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test packet generation overflow +; +; Variant: tracing remains enabled, timing packets in-between +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: hlt + +; @pt p4: ovf() +; @pt p5: tsc(0xa00) +; @pt p6: cbr(0xf) +; @pt p7: tma(0xa, 0xc) +; @pt p8: pad() +; @pt p9: mtc(0x10) +; @pt p10: cyc(0xa) +; @pt p11: fup(3: %l1) +l1: nop + +; @pt p12: fup(1: %l2) +; @pt p13: tip.pgd(0: %l3) +l2: nop +l3: hlt + + +; @pt .exp(ptxed) +;[overflow] +;%0l1 +;[disabled] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 ovf +;%0p5 tsc a00 +;%0p6 cbr f +;%0p7 tma a, c +;%0p8 pad +;%0p9 mtc 10 +;%0p10 cyc a +;%0p11 fup 3: %?l1 +;%0p12 fup 1: %?l2.2 +;%0p13 tip.pgd 0: %?l3.0 diff --git a/test/src/ovf-timing-tip_pge.ptt b/test/src/ovf-timing-tip_pge.ptt new file mode 100644 index 0000000..45f86de --- /dev/null +++ b/test/src/ovf-timing-tip_pge.ptt @@ -0,0 +1,79 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test packet generation overflow +; +; Variant: tracing disabled, timing packets in-between +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(32bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: hlt + +; @pt p4: ovf() +; @pt p5: tsc(0xa00) +; @pt p6: cbr(0xf) +; @pt p7: tma(0xa, 0xc) +; @pt p8: pad() +; @pt p9: mtc(0x10) +; @pt p10: cyc(0xa) +; @pt p11: mode.exec(64bit) +; @pt p12: tip.pge(3: %l1) +l1: nop + +; @pt p13: fup(1: %l2) +; @pt p14: tip.pgd(0: %l3) +l2: nop +l3: hlt + + +; @pt .exp(ptxed) +;[overflow] +;[enabled] +;%0l1 +;[disabled] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.d +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 ovf +;%0p5 tsc a00 +;%0p6 cbr f +;%0p7 tma a, c +;%0p8 pad +;%0p9 mtc 10 +;%0p10 cyc a +;%0p11 mode.exec cs.l +;%0p12 tip.pge 3: %?l1 +;%0p13 fup 1: %?l2.2 +;%0p14 tip.pgd 0: %?l3.0 diff --git a/test/src/ovf-tip_pge.ptt b/test/src/ovf-tip_pge.ptt new file mode 100644 index 0000000..c468020 --- /dev/null +++ b/test/src/ovf-tip_pge.ptt @@ -0,0 +1,67 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test packet generation overflow +; +; Variant: tracing disabled +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(32bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: hlt + +; @pt p4: ovf() +; @pt p5: mode.exec(64bit) +; @pt p6: tip.pge(3: %l1) +l1: nop + +; @pt p7: fup(1: %l2) +; @pt p8: tip.pgd(0: %l3) +l2: nop +l3: hlt + + +; @pt .exp(ptxed) +;[overflow] +;[enabled] +;%0l1 +;[disabled] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.d +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 ovf +;%0p5 mode.exec cs.l +;%0p6 tip.pge 3: %?l1 +;%0p7 fup 1: %?l2.2 +;%0p8 tip.pgd 0: %?l3.0 diff --git a/test/src/pip-far_call.ptt b/test/src/pip-far_call.ptt new file mode 100644 index 0000000..64fc54b --- /dev/null +++ b/test/src/pip-far_call.ptt @@ -0,0 +1,67 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that PIP binds to a far branch +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: fup(3: %l1) +; @pt p4: psbend() +l1: nop + +; @pt p5: pip(0xcdcdc0) +; @pt p6: tip(3: %l4) +l2: call far [rax] ; l4 +l3: hlt + +l4: nop + +; @pt p7: fup(1: %l5) +; @pt p8: tip.pgd(0: %l6) +l5: nop +l6: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 fup 3: %?l1 +;%0p4 psbend +;%0p5 pip cdcdc0 cr3 0000000000cdcdc0 +;%0p6 tip 3: %?l4 +;%0p7 fup 1: %?l5.2 +;%0p8 tip.pgd 0: %?l6.0 + + +; @pt .exp(ptxed) +;%0l1 # nop +;%0l2 # call far [rax] # l4 +;%0l4 # nop +;[disabled] diff --git a/test/src/pip-pip_mov_cr3-fail.ptt b/test/src/pip-pip_mov_cr3-fail.ptt new file mode 100644 index 0000000..7129a95 --- /dev/null +++ b/test/src/pip-pip_mov_cr3-fail.ptt @@ -0,0 +1,60 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that a paging event is bound to the next MOV CR3 instruction. +; +; Variant: Only one event binds to each instruction. While searching +; for the next MOV CR3 to bind the second event, we run out +; of code. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +; @pt p5: pip(0xa000) +; @pt p6: pip(0xb000) +l1: mov cr3, rax +l2: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 pip a000 cr3 000000000000a000 +;%0p6 pip b000 cr3 000000000000b000 + + +; @pt .exp(ptxed) +;%0l1 # mov cr3, rax +;%0l2 # hlt +;[%eos, 100004: error: no memory mapped at this address] diff --git a/test/src/pip_mov_cr3-pip_mov_cr3.ptt b/test/src/pip_mov_cr3-pip_mov_cr3.ptt new file mode 100644 index 0000000..2c940f3 --- /dev/null +++ b/test/src/pip_mov_cr3-pip_mov_cr3.ptt @@ -0,0 +1,64 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that a paging event is bound to the next MOV CR3 instruction. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +; @pt p5: pip(0xa000) +l1: mov cr3, rax + +; @pt p6: pip(0xb000) +l2: mov cr3, rax + +; @pt p7: fup(1: %l3) +; @pt p8: tip.pgd(0: %l4) +l3: nop +l4: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 pip a000 cr3 000000000000a000 +;%0p6 pip b000 cr3 000000000000b000 +;%0p7 fup 1: %?l3.2 +;%0p8 tip.pgd 0: %?l4.0 + + +; @pt .exp(ptxed) +;%0l1 # mov cr3, rax +;%0l2 # mov cr3, rax +;[disabled] diff --git a/test/src/psb-empty.ptt b/test/src/psb-empty.ptt new file mode 100644 index 0000000..2843177 --- /dev/null +++ b/test/src/psb-empty.ptt @@ -0,0 +1,45 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that we do not diagnose an error for an empty trace. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; yasm does not like empty files + nop + + +; @pt .exp(ptxed) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend diff --git a/test/src/psb-fup-tip_pgd.ptt b/test/src/psb-fup-tip_pgd.ptt new file mode 100644 index 0000000..a4f9fa4 --- /dev/null +++ b/test/src/psb-fup-tip_pgd.ptt @@ -0,0 +1,53 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that FUP + TIP.PGD disables tracing as part of the asynchronous +; branch. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: nop +l2: nop +; @pt p5: fup(1: %l1) +; @pt p6: tip.pgd(3: %l2) + + +; @pt .exp(ptxed) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 fup 1: %?l1.2 +;%0p6 tip.pgd 3: %?l2 diff --git a/test/src/psb-ovf-fup.ptt b/test/src/psb-ovf-fup.ptt new file mode 100644 index 0000000..b59424b --- /dev/null +++ b/test/src/psb-ovf-fup.ptt @@ -0,0 +1,61 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test packet generation overflow +; +; Variant: tracing remains enabled, overflow during PSB+ +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: ovf() +; @pt p4: fup(3: %l1) +l0: hlt +l1: nop + +; @pt p5: fup(1: %l2) +; @pt p6: tip.pgd(0: %l3) +l2: nop +l3: hlt + + +; @pt .exp(ptxed) +;[overflow] +;%0l1 +;[disabled] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 ovf +;%0p4 fup 3: %?l1 +;%0p5 fup 1: %?l2.2 +;%0p6 tip.pgd 0: %?l3.0 diff --git a/test/src/psb-ovf-tip_pge.ptt b/test/src/psb-ovf-tip_pge.ptt new file mode 100644 index 0000000..6453507 --- /dev/null +++ b/test/src/psb-ovf-tip_pge.ptt @@ -0,0 +1,65 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test packet generation overflow +; +; Variant: tracing disabled, overflow during PSB+ +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(32bit) +; @pt p2: fup(3: %l0) +; @pt p3: ovf() +l0: hlt + +; @pt p4: mode.exec(64bit) +; @pt p5: tip.pge(3: %l1) +l1: nop + +; @pt p6: fup(1: %l2) +; @pt p7: tip.pgd(0: %l3) +l2: nop +l3: hlt + + +; @pt .exp(ptxed) +;[overflow] +;[enabled] +;%0l1 +;[disabled] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.d +;%0p2 fup 3: %?l0 +;%0p3 ovf +;%0p4 mode.exec cs.l +;%0p5 tip.pge 3: %?l1 +;%0p6 fup 1: %?l2.2 +;%0p7 tip.pgd 0: %?l3.0 diff --git a/test/src/psb-pip-psb.ptt b/test/src/psb-pip-psb.ptt new file mode 100644 index 0000000..2541024 --- /dev/null +++ b/test/src/psb-pip-psb.ptt @@ -0,0 +1,55 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that a PIP is processed while tracing is disabled. +; +; Variant: not enabled between two syncpoints. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: pip(0xa00) + +; @pt p4: psb() +; @pt p5: psbend() + +; yasm does not like empty files + nop + + +; @pt .exp(ptxed) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 pip a00 cr3 0000000000000a00 +;%0p4 psb +;%0p5 psbend diff --git a/test/src/psb-pip-tip_pge.ptt b/test/src/psb-pip-tip_pge.ptt new file mode 100644 index 0000000..350ca59 --- /dev/null +++ b/test/src/psb-pip-tip_pge.ptt @@ -0,0 +1,61 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that a PIP is processed while tracing is disabled. +; +; Variant: not enabled at syncpoint. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: psbend() + +; @pt p4: pip(0xa00) +; @pt p5: tip.pge(3: %l1) +l1: nop + +l2: nop +l3: hlt +; @pt p6: fup(1: %l2) +; @pt p7: tip.pgd(0: %l3) + + +; @pt .exp(ptxed) +;[enabled] +;%0l1 # nop +;[disabled] + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 psbend +;%0p4 pip a00 cr3 0000000000000a00 +;%0p5 tip.pge 3: %0l1 +;%0p6 fup 1: %?l2.2 +;%0p7 tip.pgd 0: %?l3.0 diff --git a/test/src/psb-psb.ptt b/test/src/psb-psb.ptt new file mode 100644 index 0000000..0fd476f --- /dev/null +++ b/test/src/psb-psb.ptt @@ -0,0 +1,64 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that linear code between two PSB+ is printed correctly +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: nop + +; @pt p5: psb() +; @pt p6: fup(3: %l2) +; @pt p7: mode.exec(64bit) +; @pt p8: psbend() + +; @pt p9: fup(3: %l2) +; @pt p10: tip.pgd(0: 0) +l2: nop + + +; @pt .exp(ptxed) +;%0l1 # nop +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 psb +;%0p6 fup 3: %0l2 +;%0p7 mode.exec cs.l +;%0p8 psbend +;%0p9 fup 3: %0l2 +;%0p10 tip.pgd 0: ???????????????? diff --git a/test/src/psb-stop.ptt b/test/src/psb-stop.ptt new file mode 100644 index 0000000..67d7a80 --- /dev/null +++ b/test/src/psb-stop.ptt @@ -0,0 +1,46 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TraceStop is applied to the same instruction as a preceding TIP.PGD. +; +; Variant: we just sync'ed. +; + +org 0x100000 + +; @pt p1: psb() +; @pt p2: psbend() +; @pt p3: stop() + +; yasm does not like empty files +nop + +; @pt .exp(ptxed) + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 stop diff --git a/test/src/psb-tip_pgd-stop.ptt b/test/src/psb-tip_pgd-stop.ptt new file mode 100644 index 0000000..fe82b20 --- /dev/null +++ b/test/src/psb-tip_pgd-stop.ptt @@ -0,0 +1,52 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TraceStop is applied to the same instruction as a preceding TIP.PGD. +; +; Variant: we sync'ed right at the TIP.PGD. +; + +org 0x100000 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: psbend() +l1: nop + +; @pt p4: fup(1: %l1) +; @pt p5: tip.pgd(0: %l2) +; @pt p6: stop() +l2: hlt + +; @pt .exp(ptxed) + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 psbend +;%0p4 fup 1: %?l1.2 +;%0p5 tip.pgd 0: %?l2.0 +;%0p6 stop diff --git a/test/src/psb-tnt-psb.ptt b/test/src/psb-tnt-psb.ptt new file mode 100644 index 0000000..0e31993 --- /dev/null +++ b/test/src/psb-tnt-psb.ptt @@ -0,0 +1,69 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that we keep the tnt cache intact when reading ahead over a PSB+. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: jne l3 +; @pt p5: tnt(t) +l2: hlt + +; @pt p6: psb() +; @pt p7: mode.exec(64bit) +; @pt p8: fup(3: %l3) +; @pt p9: psbend() +l3: nop +l4: nop +; @pt p10: fup(1: %l4) +; @pt p11: tip.pgd(0: %l5) +l5: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tnt.8 ! +;%0p6 psb +;%0p7 mode.exec cs.l +;%0p8 fup 3: %0l3 +;%0p9 psbend +;%0p10 fup 1: %?l4.2 +;%0p11 tip.pgd 0: %?l5.0 + + +; @pt .exp(ptxed) +;%0l1 # jne l3 +;%0l3 # nop +;[disabled] diff --git a/test/src/psb-tsx.ptt b/test/src/psb-tsx.ptt new file mode 100644 index 0000000..29813e0 --- /dev/null +++ b/test/src/psb-tsx.ptt @@ -0,0 +1,57 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that MODE.TSX in PSB+ is used to initialize the ptxed state. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: mode.tsx(begin) +; @pt p5: psbend() +l1: nop + +; @pt p6: fup(3: %l2) +; @pt p7: tip.pgd(0: 0) +l2: nop + + +; @pt .exp(ptxed) +;? %0l1 # nop +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 mode.tsx intx +;%0p5 psbend +;%0p6 fup 3: %0l2 +;%0p7 tip.pgd 0: ???????????????? diff --git a/test/src/psb-vmcs.ptt b/test/src/psb-vmcs.ptt new file mode 100644 index 0000000..98bf811 --- /dev/null +++ b/test/src/psb-vmcs.ptt @@ -0,0 +1,46 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that we print VMCS correctly +; + +org 0x100000 + +; @pt p1: psb() +; @pt p2: vmcs(0xcdcdf000) +; @pt p3: psbend() + +; yasm does not like empty files + nop + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 vmcs cdcdf000 vmcs 00000000cdcdf000 +;%0p3 psbend + + +; @pt .exp(ptxed) diff --git a/test/src/psb_nofup-psb.ptt b/test/src/psb_nofup-psb.ptt new file mode 100644 index 0000000..3c634a8 --- /dev/null +++ b/test/src/psb_nofup-psb.ptt @@ -0,0 +1,61 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that linear code between two PSB+ is printed correctly +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + nop + +; @pt p3: psb() +; @pt p4: fup(3: %l2) +; @pt p5: mode.exec(64bit) +; @pt p6: psbend() +l2: nop + +; @pt p7: fup(3: %l3) +; @pt p8: tip.pgd(0: 0) +l3: nop + + +; @pt .exp(ptxed) +;%0l2 # nop +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 psb +;%0p4 fup 3: %0l2 +;%0p5 mode.exec cs.l +;%0p6 psbend +;%0p7 fup 3: %0l3 +;%0p8 tip.pgd 0: ???????????????? diff --git a/test/src/ptdump-exec-mode.ptt b/test/src/ptdump-exec-mode.ptt new file mode 100644 index 0000000..e3096d7 --- /dev/null +++ b/test/src/ptdump-exec-mode.ptt @@ -0,0 +1,46 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that ptdump prints the execution mode correctly. +; +; opt:ptdump --exec-mode + +org 0x1000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: mode.exec(32bit) +; @pt p4: mode.exec(16bit) +; @pt p5: psbend() + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l em 64-bit +;%0p3 mode.exec cs.d em 32-bit +;%0p4 mode.exec em 16-bit +;%0p5 psbend diff --git a/test/src/ptdump-last-ip.ptt b/test/src/ptdump-last-ip.ptt new file mode 100644 index 0000000..c26be6b --- /dev/null +++ b/test/src/ptdump-last-ip.ptt @@ -0,0 +1,55 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that ptdump prints last-ip correctly. +; +; opt:ptdump --lastip + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: psbend() + +; @pt p2: fup(6: 0x0a00ccccddddeeee) +; @pt p3: tip(4: 0xffffeeeeffff) +; @pt p4: tip.pge(1: 0xdddd) +; @pt p5: fup(3: 0xffffddddeeee) +; @pt p6: tip.pgd(2: 0xeeeeffff) + + +; yasm does not like empty files + nop + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 psbend +;%0p2 fup 6: 0a00ccccddddeeee ip 0a00ccccddddeeee +;%0p3 tip 4: ????ffffeeeeffff ip 0a00ffffeeeeffff +;%0p4 tip.pge 1: ????????????dddd ip 0a00ffffeeeedddd +;%0p5 fup 3: ffffffffddddeeee ip ffffffffddddeeee +;%0p6 tip.pgd 2: ????????eeeeffff ip ffffffffeeeeffff diff --git a/test/src/ptdump-no-offset-raw.ptt b/test/src/ptdump-no-offset-raw.ptt new file mode 100644 index 0000000..f6cd67f --- /dev/null +++ b/test/src/ptdump-no-offset-raw.ptt @@ -0,0 +1,45 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that ptdump honors --no-offset +; +; Variant: the raw packet bytes are printed in the first column. +; +; opt:ptdump --no-offset --raw + +org 0x1000 +bits 64 + +; @pt psb() +; @pt psbend() + +; yasm does not like empty files + nop + + +; @pt .exp(ptdump) +;02820282028202820282028202820282 psb +;0223 psbend diff --git a/test/src/ptdump-no-offset.ptt b/test/src/ptdump-no-offset.ptt new file mode 100644 index 0000000..d80cf75 --- /dev/null +++ b/test/src/ptdump-no-offset.ptt @@ -0,0 +1,45 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that ptdump honors --no-offset +; +; Variant: the packet is printed inthe first column. +; +; opt:ptdump --no-offset + +org 0x1000 +bits 64 + +; @pt psb() +; @pt psbend() + +; yasm does not like empty files + nop + + +; @pt .exp(ptdump) +;psb +;psbend diff --git a/test/src/ptxed-block-stat.ptt b/test/src/ptxed-block-stat.ptt new file mode 100644 index 0000000..3be3646 --- /dev/null +++ b/test/src/ptxed-block-stat.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that ptxed counts instructions and blocks correctly. +; +; opt:ptxed --block-decoder --stat + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop +l1: nop +l2: nop +l3: nop + +; @pt p4: fup(1: %l4) +; @pt p5: tip.pgd(0: %l4) +l4: hlt + + +; @pt .exp(ptxed) +;%0l0 +;%0l1 +;%0l2 +;%0l3 +;[disabled] +;insn: 4. +;blocks: 1. + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 fup 1: %?l4.2 +;%0p5 tip.pgd 0: %?l4.0 diff --git a/test/src/ptxed-block-stat_blocks.ptt b/test/src/ptxed-block-stat_blocks.ptt new file mode 100644 index 0000000..005f49d --- /dev/null +++ b/test/src/ptxed-block-stat_blocks.ptt @@ -0,0 +1,62 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that ptxed counts instructions and blocks correctly. +; +; opt:ptxed --block-decoder --stat --stat:blocks + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop +l1: nop +l2: nop +l3: nop + +; @pt p4: fup(1: %l4) +; @pt p5: tip.pgd(0: %l4) +l4: hlt + + +; @pt .exp(ptxed) +;%0l0 +;%0l1 +;%0l2 +;%0l3 +;[disabled] +;blocks: 1. + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 fup 1: %?l4.2 +;%0p5 tip.pgd 0: %?l4.0 diff --git a/test/src/ptxed-end_on_call-fup-tip_pgd.ptt b/test/src/ptxed-end_on_call-fup-tip_pgd.ptt new file mode 100644 index 0000000..12236ba --- /dev/null +++ b/test/src/ptxed-end_on_call-fup-tip_pgd.ptt @@ -0,0 +1,66 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test the end-on-call block decoder option. +; +; Variant: there's an async disable event after the call. +; +; opt:ptxed --block-decoder --block:show-blocks --block:end-on-call + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop +l1: call l3 +l2: hlt + +l3: nop + +; @pt p4: fup(1: %l4) +; @pt p5: tip.pgd(0: %l4) +l4: hlt + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 fup 1: %?l4.2 +;%0p5 tip.pgd 0: %?l4.0 + + +; @pt .exp(ptxed) +;[block] +;%0l0 # nop +;%0l1 # call l3 +;[block] +;%0l3 # nop +;[disabled] diff --git a/test/src/ptxed-end_on_call-ret_tip.ptt b/test/src/ptxed-end_on_call-ret_tip.ptt new file mode 100644 index 0000000..7edd62a --- /dev/null +++ b/test/src/ptxed-end_on_call-ret_tip.ptt @@ -0,0 +1,82 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test the end-on-call block decoder option. +; +; Variant: there's an uncompressed return after the call +; +; opt:ptxed --block-decoder --block:show-blocks --block:end-on-call + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop +l1: call l4 +l2: nop +l3: hlt + +; we first return to l0 to take another round using the block cache. +; +; @pt p4: tip(1: %l0) +; @pt p5: tip(1: %l2) +l4: nop +l5: ret + +; @pt p6: fup(1: %l3) +; @pt p7: tip.pgd(0: %l3) + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 tip 1: %?l0.2 +;%0p5 tip 1: %?l2.2 +;%0p6 fup 1: %?l3.2 +;%0p7 tip.pgd 0: %?l3.0 + + +; @pt .exp(ptxed) +;[block] +;%0l0 # nop +;%0l1 # call l4 +;[block] +;%0l4 # nop +;%0l5 # ret +;[block] +;%0l0 # nop +;%0l1 # call l4 +;[block] +;%0l4 # nop +;%0l5 # ret +;[block] +;%0l2 # nop +;[disabled] diff --git a/test/src/ptxed-end_on_call-ret_tnt.ptt b/test/src/ptxed-end_on_call-ret_tnt.ptt new file mode 100644 index 0000000..e8bd278 --- /dev/null +++ b/test/src/ptxed-end_on_call-ret_tnt.ptt @@ -0,0 +1,72 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test the end-on-call block decoder option. +; +; Variant: there's a compressed return after the call +; +; opt:ptxed --block-decoder --block:show-blocks --block:end-on-call + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop +l1: call l4 +l2: nop +l3: hlt + +; @pt p4: tnt(t) +l4: nop +l5: ret + +; @pt p5: fup(1: %l3) +; @pt p6: tip.pgd(0: %l3) + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 tnt.8 ! +;%0p5 fup 1: %?l3.2 +;%0p6 tip.pgd 0: %?l3.0 + + +; @pt .exp(ptxed) +;[block] +;%0l0 # nop +;%0l1 # call l4 +;[block] +;%0l4 # nop +;%0l5 # ret +;[block] +;%0l2 # nop +;[disabled] diff --git a/test/src/ptxed-end_on_call-tip_pgd.ptt b/test/src/ptxed-end_on_call-tip_pgd.ptt new file mode 100644 index 0000000..a0e6ef9 --- /dev/null +++ b/test/src/ptxed-end_on_call-tip_pgd.ptt @@ -0,0 +1,65 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test the end-on-call block decoder option. +; +; Variant: there's a disable event after the call. +; +; opt:ptxed --block-decoder --block:show-blocks --block:end-on-call + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop +l1: call l3 +l2: hlt + +l3: nop +l4: ret + +; @pt p4: tip.pgd(0: %l2) + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 tip.pgd 0: %?l2.0 + + +; @pt .exp(ptxed) +;[block] +;%0l0 # nop +;%0l1 # call l3 +;[block] +;%0l3 # nop +;%0l4 # ret +;[disabled] diff --git a/test/src/ptxed-insn-stat.ptt b/test/src/ptxed-insn-stat.ptt new file mode 100644 index 0000000..0014460 --- /dev/null +++ b/test/src/ptxed-insn-stat.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that ptxed counts instructions correctly. +; +; opt:ptxed --insn-decoder --stat + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop +l1: nop +l2: nop +l3: nop + +; @pt p4:fup(3: %l4) +; @pt p5:tip.pgd(0: %l5) +l4: nop +l5: hlt + + +; @pt .exp(ptxed) +;%0l0 +;%0l1 +;%0l2 +;%0l3 +;[disabled] +;insn: 4. + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %0l0 +;%0p3 psbend +;%0p4 fup 3: %0l4 +;%0p5 tip.pgd 0: %?l5.0 diff --git a/test/src/ptxed-stat_insn.ptt b/test/src/ptxed-stat_insn.ptt new file mode 100644 index 0000000..e7e3806 --- /dev/null +++ b/test/src/ptxed-stat_insn.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that ptxed counts instructions correctly. +; +; opt:ptxed --stat --stat:insn + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop +l1: nop +l2: nop +l3: nop + +; @pt p4:fup(3: %l4) +; @pt p5:tip.pgd(0: %l5) +l4: nop +l5: hlt + + +; @pt .exp(ptxed) +;%0l0 +;%0l1 +;%0l2 +;%0l3 +;[disabled] +;insn: 4. + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %0l0 +;%0p3 psbend +;%0p4 fup 3: %0l4 +;%0p5 tip.pgd 0: %?l5.0 diff --git a/test/src/ret_near_far.ptt b/test/src/ret_near_far.ptt new file mode 100644 index 0000000..ec54eb0 --- /dev/null +++ b/test/src/ret_near_far.ptt @@ -0,0 +1,361 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that far returns are not considered for ret compression +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +l1: call l5 +l2: nop +l3: nop +l4: hlt + +l5: test eax, 0 +l6: jz l10 +l7: call l5 +l8: ret +l9: hlt + +l10: call far [rax] ; l13 +l11: jmp l8 +l12: hlt + +l13: retf +l14: hlt + +; Let's assume the call in l7 is executed 63 times. This doesn't make sense +; from looking at the code above, but that's not the point, here. +; +; All calls are direct, so far, but we have a conditional jump in l6, which +; is executed 64 times. On the 64th execution, it is taken and brings us to +; the far call in l10. +; +; @pt p5: tnt64(nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn) +; @pt p6: tnt64(nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnt) + +; Together with the call in l1 we now have a full return compression stack. +; +; @pt p7: tip(3: %l13) + +; The far return is not compressed. +; +; @pt p8: tip(3: %l11) + +; The following 64 returns are. +; +; @pt p9: tnt64(tttttttttttttttttttttttttttttttt) +; @pt p10: tnt64(tttttttttttttttttttttttttttttttt) + +; Disable tracing to complete the test. +; +; @pt p11: fup(3: %l3) +; @pt p12: tip.pgd(0: %l4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tnt.64 ................................ +;%0p6 tnt.64 ...............................! +;%0p7 tip 3: %0l13 +;%0p8 tip 3: %0l11 +;%0p9 tnt.64 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +;%0p10 tnt.64 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +;%0p11 fup 3: %0l3 +;%0p12 tip.pgd 0: %?l4.0 + + +; @pt .exp(ptxed) +;%0l1 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l7 # call l5 +;%0l5 # test eax +;%0l6 # jz l10 +;%0l10 # call far [rax] # l13 +;%0l13 # retf +;%0l11 # jmp l8 +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l8 # ret +;%0l2 # nop +;[disabled] diff --git a/test/src/skd007.ptt b/test/src/skd007.ptt new file mode 100644 index 0000000..6d87fc0 --- /dev/null +++ b/test/src/skd007.ptt @@ -0,0 +1,79 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; SKD007: Intel(R) PT Buffer Overflow May Result in Incorrect Packets. +; +; Under complex micro-architectural conditions, an Intel PT (Processor +; Trace) OVF (Overflow) packet may be issued after the first byte of a +; multi-byte CYC (Cycle Count) packet, instead of any remaining bytes +; of the CYC. +; +; cpu 6/78 +; cpu 6/94 +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +l0: nop + +; The first CYC has its 2nd byte overwritten by OVF, which appears as +; another CYC packet. The two CYCs will have payloads of: +; +; 0x3* or 0x2* and +; 0x1e +; +; @pt p3: cyc(0x3e) +; @pt p4: cyc(0x1e) +; @pt p5: pad() +; @pt p6: fup(3: %l1) +l1: nop + +; @pt p7: fup(1: %l2) +; @pt p8: tip.pgd(0: %l3) +l2: nop +l3: hlt + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 cyc 3e +;%0p4 cyc 1e +;%0p5 pad +;%0p6 fup 3: %?l1 +;%0p7 fup 1: %?l2.2 +;%0p8 tip.pgd 0: %?l3.0 + + +; @pt .exp(ptxed) +;[overflow] +;%0l1 # nop +;[disabled] diff --git a/test/src/skd010-mode_tsx-fup.ptt b/test/src/skd010-mode_tsx-fup.ptt new file mode 100644 index 0000000..59b031f --- /dev/null +++ b/test/src/skd010-mode_tsx-fup.ptt @@ -0,0 +1,73 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; SKD010: Intel(R) PT FUP May be Dropped After OVF. +; +; Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not +; be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP +; Packet, Packet Generation Enable). +; +; cpu 6/78 +; cpu 6/94 +; +; Variant: Missing FUP, sync at MODE.TSX + FUP. +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: psbend() + +; @pt p3: ovf() +; fup missing + +; @pt p4: mode.tsx(begin) +; @pt p5: fup(3: %l0) +l0: nop + +; @pt p6: fup(1: %l1) +; @pt p7: tip.pgd(0: %l2) +l1: nop +l2: hlt + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 psbend +;%0p3 ovf +;%0p4 mode.tsx intx +;%0p5 fup 3: %?l0 +;%0p6 fup 1: %?l1.2 +;%0p7 tip.pgd 0: %?l2.0 + + +; @pt .exp(ptxed) +;[overflow] +;? %0l0 +;[disabled] diff --git a/test/src/skd010-psb.ptt b/test/src/skd010-psb.ptt new file mode 100644 index 0000000..51775ff --- /dev/null +++ b/test/src/skd010-psb.ptt @@ -0,0 +1,77 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; SKD010: Intel(R) PT FUP May be Dropped After OVF. +; +; Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not +; be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP +; Packet, Packet Generation Enable). +; +; cpu 6/78 +; cpu 6/94 +; +; Variant: Missing FUP, sync at PSB+. +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: psbend() + +; @pt p3: ovf() +; fup missing + +; @pt p4: psb() +; @pt p5: mode.exec(64bit) +; @pt p6: fup(3: %l0) +; @pt p7: psbend() +l0: nop + +; @pt p8: fup(1: %l1) +; @pt p9: tip.pgd(0: %l2) +l1: nop +l2: hlt + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 psbend +;%0p3 ovf +;%0p4 psb +;%0p5 mode.exec cs.l +;%0p6 fup 3: %?l0 +;%0p7 psbend +;%0p8 fup 1: %?l1.2 +;%0p9 tip.pgd 0: %?l2.0 + + +; @pt .exp(ptxed) +;[overflow] +;%0l0 # nop +;[disabled] diff --git a/test/src/skd010-tip.ptt b/test/src/skd010-tip.ptt new file mode 100644 index 0000000..4d5eb3e --- /dev/null +++ b/test/src/skd010-tip.ptt @@ -0,0 +1,71 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; SKD010: Intel(R) PT FUP May be Dropped After OVF. +; +; Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not +; be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP +; Packet, Packet Generation Enable). +; +; cpu 6/78 +; cpu 6/94 +; +; Variant: Missing FUP, sync at TIP. +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: psbend() + +; @pt p3: ovf() +; fup missing + +; @pt p4: tip(3: %l0) +l0: nop + +; @pt p5: fup(1: %l1) +; @pt p6: tip.pgd(0: %l2) +l1: nop +l2: hlt + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 psbend +;%0p3 ovf +;%0p4 tip 3: %?l0 +;%0p5 fup 1: %?l1.2 +;%0p6 tip.pgd 0: %?l2.0 + + +; @pt .exp(ptxed) +;[overflow] +;%0l0 # nop +;[disabled] diff --git a/test/src/skd010-tip_pgd.ptt b/test/src/skd010-tip_pgd.ptt new file mode 100644 index 0000000..7200581 --- /dev/null +++ b/test/src/skd010-tip_pgd.ptt @@ -0,0 +1,82 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; SKD010: Intel(R) PT FUP May be Dropped After OVF. +; +; Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not +; be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP +; Packet, Packet Generation Enable). +; +; cpu 6/78 +; cpu 6/94 +; +; Variant: Missing FUP, sync at TIP.PGD. +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: hlt + +; @pt p4: ovf() +; fup missing + +; @pt p5: tip.pgd(0: %l1) +l1: hlt + +; We need to re-enable tracing in order to get the overflow indication +; at the enable instruction. +; +; @pt p6: tip.pge(3: %l2) +l2: nop + +; @pt p7: fup(1: %l3) +; @pt p8: tip.pgd(0: %l4) +l3: nop +l4: hlt + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 ovf +;%0p5 tip.pgd 0: %?l1.0 +;%0p6 tip.pge 3: %?l2 +;%0p7 fup 1: %?l3.2 +;%0p8 tip.pgd 0: %?l4.0 + + +; @pt .exp(ptxed) +;[overflow] +;[enabled] +;%0l2 +;[disabled] diff --git a/test/src/skd022.ptt b/test/src/skd022.ptt new file mode 100644 index 0000000..3526dde --- /dev/null +++ b/test/src/skd022.ptt @@ -0,0 +1,79 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; SKD022: VM Entry That Clears TraceEn May Generate a FUP. +; +; If VM entry clears Intel(R) PT (Intel Processor Trace) +; IA32_RTIT_CTL.TraceEn (MSR 570H, bit 0) while PacketEn is 1 then a +; FUP (Flow Update Packet) will precede the TIP.PGD (Target IP Packet, +; Packet Generation Disable). VM entry can clear TraceEn if the +; VM-entry MSR-load area includes an entry for the IA32_RTIT_CTL MSR. +; +; cpu 6/78 +; cpu 6/94 +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: vmlaunch + +; @pt p4: fup(1: %l0) +; @pt p5: tip.pgd(0: %l1) +l1: hlt + +; @pt p6: tip.pge(3: %l2) +l2: nop + +; @pt p7: fup(1: %l3) +; @pt p8: tip.pgd(0: %l4) +l3: vmresume +l4: hlt + + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 fup 1: %?l0.2 +;%0p5 tip.pgd 0: %?l1.0 +;%0p6 tip.pge 3: %?l2 +;%0p7 fup 1: %?l3.2 +;%0p8 tip.pgd 0: %?l4.0 + + +; @pt .exp(ptxed) +;%0l0 # vmlaunch +;[disabled] +;[enabled] +;%0l2 # nop +;%0l3 # vmresume +;[disabled] diff --git a/test/src/syscall-sysret-cpl_0.ptt b/test/src/syscall-sysret-cpl_0.ptt new file mode 100644 index 0000000..bcc4471 --- /dev/null +++ b/test/src/syscall-sysret-cpl_0.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that SYSCALL followed by SYSRET are decoded correctly. +; +; Variant: cpl 3 filtered out +; + +org 0x100000 +bits 64 +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: psbend() + +; @pt p4: tip.pge(3: %l5) + +l1: syscall +l2: nop +l3: nop +l4: hlt + +l5: nop +l6: sysret +l7: hlt + +; @pt p5: tip.pgd(0: %l2) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 psbend +;%0p4 tip.pge 3: %0l5 +;%0p5 tip.pgd 0: %?l2.0 + +; @pt .exp(ptxed) +;[enabled] +;%0l5 # nop +;%0l6 # sysret +;[disabled] diff --git a/test/src/syscall-sysret-cpl_3.ptt b/test/src/syscall-sysret-cpl_3.ptt new file mode 100644 index 0000000..ec1b93c --- /dev/null +++ b/test/src/syscall-sysret-cpl_3.ptt @@ -0,0 +1,71 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that SYSCALL followed by SYSRET are decoded correctly. +; +; Variant: cpl 0 filtered out +; + +org 0x100000 +bits 64 +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +; @pt p5: tip.pgd(0: %l5) + +l1: syscall +l2: nop +l3: nop +l4: hlt + +l5: nop +l6: sysret +l7: hlt + +; @pt p6: tip.pge(3: %l2) + +; @pt p7: fup(1: %l3) +; @pt p8: tip.pgd(0: %l4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 0: %?l5.0 +;%0p6 tip.pge 3: %?l2 +;%0p7 fup 1: %?l3.2 +;%0p8 tip.pgd 0: %?l4.0 + +; @pt .exp(ptxed) +;%0l1 # syscall +;[disabled] +;[resumed] +;%0l2 # nop +;[disabled] diff --git a/test/src/syscall-sysret.ptt b/test/src/syscall-sysret.ptt new file mode 100644 index 0000000..23ad21f --- /dev/null +++ b/test/src/syscall-sysret.ptt @@ -0,0 +1,71 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that SYSCALL followed by SYSRET are decoded correctly. +; +; Variant: no cpl filtering +; + +org 0x100000 +bits 64 +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +; @pt p5: tip(3: %l5) + +l1: syscall +l2: nop +l3: nop +l4: hlt + +l5: nop +l6: sysret +l7: hlt + +; @pt p6: tip(3: %l2) + +; @pt p7: fup(1: %l3) +; @pt p8: tip.pgd(0: %l4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip 3: %0l5 +;%0p6 tip 3: %0l2 +;%0p7 fup 1: %?l3.2 +;%0p8 tip.pgd 0: %?l4.0 + +; @pt .exp(ptxed) +;%0l1 # syscall +;%0l5 # nop +;%0l6 # sysret +;%0l2 # nop +;[disabled] diff --git a/test/src/sysenter-sysexit-cpl_0.ptt b/test/src/sysenter-sysexit-cpl_0.ptt new file mode 100644 index 0000000..587039b --- /dev/null +++ b/test/src/sysenter-sysexit-cpl_0.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that SYSENTER followed by SYSEXIT are decoded correctly. +; +; Variant: cpl 3 filtered out +; + +org 0x100000 +bits 64 +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: psbend() + +; @pt p4: tip.pge(3: %l5) + +l1: db 0x0f, 0x34 ; sysenter +l2: nop +l3: nop +l4: hlt + +l5: nop +l6: db 0x0f, 0x35 ; sysexit +l7: hlt + +; @pt p5: tip.pgd(0: %l2) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 psbend +;%0p4 tip.pge 3: %0l5 +;%0p5 tip.pgd 0: %?l2.0 + +; @pt .exp(ptxed) +;[enabled] +;%0l5 # nop +;%0l6 # sysexit +;[disabled] diff --git a/test/src/sysenter-sysexit-cpl_3.ptt b/test/src/sysenter-sysexit-cpl_3.ptt new file mode 100644 index 0000000..455eb06 --- /dev/null +++ b/test/src/sysenter-sysexit-cpl_3.ptt @@ -0,0 +1,71 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that SYSENTER followed by SYSEXIT are decoded correctly. +; +; Variant: cpl 0 filtered out +; + +org 0x100000 +bits 64 +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +; @pt p5: tip.pgd(0: %l5) + +l1: db 0x0f, 0x34 ; sysenter +l2: nop +l3: nop +l4: hlt + +l5: nop +l6: db 0x0f, 0x35 ; sysexit +l7: hlt + +; @pt p6: tip.pge(3: %l2) + +; @pt p7: fup(1: %l3) +; @pt p8: tip.pgd(0: %l4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 0: %?l5.0 +;%0p6 tip.pge 3: %?l2 +;%0p7 fup 1: %?l3.2 +;%0p8 tip.pgd 0: %?l4.0 + +; @pt .exp(ptxed) +;%0l1 # sysenter +;[disabled] +;[resumed] +;%0l2 # nop +;[disabled] diff --git a/test/src/sysenter-sysexit.ptt b/test/src/sysenter-sysexit.ptt new file mode 100644 index 0000000..7d0c095 --- /dev/null +++ b/test/src/sysenter-sysexit.ptt @@ -0,0 +1,71 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that SYSENTER followed by SYSEXIT are decoded correctly. +; +; Variant: no cpl filtering +; + +org 0x100000 +bits 64 +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +; @pt p5: tip(3: %l5) + +l1: db 0x0f, 0x34 ; sysenter +l2: nop +l3: nop +l4: hlt + +l5: nop +l6: db 0x0f, 0x35 ; sysexit +l7: hlt + +; @pt p6: tip(3: %l2) + +; @pt p7: fup(1: %l3) +; @pt p8: tip.pgd(0: %l4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip 3: %0l5 +;%0p6 tip 3: %0l2 +;%0p7 fup 1: %?l3.2 +;%0p8 tip.pgd 0: %?l4.0 + +; @pt .exp(ptxed) +;%0l1 # sysenter +;%0l5 # nop +;%0l6 # sysexit +;%0l2 # nop +;[disabled] diff --git a/test/src/tip-eos.ptt b/test/src/tip-eos.ptt new file mode 100644 index 0000000..c43b619 --- /dev/null +++ b/test/src/tip-eos.ptt @@ -0,0 +1,55 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that we indicate the end of the trace without a TIP.PGD. +; +; Variant: the trace ends after an indirect branch +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: jmp rax +l1: hlt + +; @pt p4:tip(3: %l2) +l2: hlt + + +; @pt .exp(ptxed) +;%0l0 +;[end of trace] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %0l0 +;%0p3 psbend +;%0p4 tip 3: %0l2 diff --git a/test/src/tip_pgd-direct_call.ptt b/test/src/tip_pgd-direct_call.ptt new file mode 100644 index 0000000..f4eb968 --- /dev/null +++ b/test/src/tip_pgd-direct_call.ptt @@ -0,0 +1,58 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD is applied to the next direct branch (call in this case) +; whose target matches the TIP.PGD payload. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: call l3 +l2: hlt +l3: call l5 +l4: hlt + +; @pt p5: tip.pgd(3: %l5) +l5: nop + + +; @pt .exp(ptxed) +;%0l1 # call l3 +;%0l3 # call l5 +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 3: %0l5 diff --git a/test/src/tip_pgd-direct_jump.ptt b/test/src/tip_pgd-direct_jump.ptt new file mode 100644 index 0000000..3f81655 --- /dev/null +++ b/test/src/tip_pgd-direct_jump.ptt @@ -0,0 +1,58 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD is applied to the next direct branch (jump in this case) +; whose target matches the TIP.PGD payload. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: jmp l3 +l2: hlt +l3: jmp l5 +l4: hlt + +; @pt p5: tip.pgd(3: %l5) +l5: nop + + +; @pt .exp(ptxed) +;%0l1 # jmp l3 +;%0l3 # jmp l5 +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 3: %0l5 diff --git a/test/src/tip_pgd-indirect_call.ptt b/test/src/tip_pgd-indirect_call.ptt new file mode 100644 index 0000000..e943f8f --- /dev/null +++ b/test/src/tip_pgd-indirect_call.ptt @@ -0,0 +1,58 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD is applied to the next branch (call in this case) that +; would normally generate a TIP packet. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: call l3 +l2: hlt +l3: call rax +l4: hlt + +; @pt p5: tip.pgd(3: %l5) +l5: nop + + +; @pt .exp(ptxed) +;%0l1 # call l3 +;%0l3 # call rax +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 3: %0l5 diff --git a/test/src/tip_pgd-indirect_jump.ptt b/test/src/tip_pgd-indirect_jump.ptt new file mode 100644 index 0000000..4644dcb --- /dev/null +++ b/test/src/tip_pgd-indirect_jump.ptt @@ -0,0 +1,58 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD is applied to the next branch (jump in this case) that +; would normally generate a TIP packet. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: jmp l3 +l2: hlt +l3: jmp rax +l4: hlt + +; @pt p5: tip.pgd(3: %l5) +l5: nop + + +; @pt .exp(ptxed) +;%0l1 # jmp l3 +;%0l3 # jmp rax +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 3: %0l5 diff --git a/test/src/tip_pgd-pip-tip_pge.ptt b/test/src/tip_pgd-pip-tip_pge.ptt new file mode 100644 index 0000000..8a040bf --- /dev/null +++ b/test/src/tip_pgd-pip-tip_pge.ptt @@ -0,0 +1,70 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that a PIP is processed while tracing is disabled. +; +; Variant: disable during normal tracing. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: syscall + +; @pt p5: tip.pgd(0: %l2) +l2: hlt + +; @pt p6: pip(0xa00) +; @pt p7: tip.pge(3: %l3) +l3: nop + +l4: nop +l5: hlt +; @pt p8: fup(1: %l4) +; @pt p9: tip.pgd(0: %l5) + + +; @pt .exp(ptxed) +;%0l1 # syscall +;[disabled] +;[enabled] +;%0l3 # nop +;[disabled] + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %?l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 0: %?l2.0 +;%0p6 pip a00 cr3 0000000000000a00 +;%0p7 tip.pge 3: %?l3 +;%0p8 fup 1: %?l4.2 +;%0p9 tip.pgd 0: %?l5.0 diff --git a/test/src/tip_pgd-psb-stop.ptt b/test/src/tip_pgd-psb-stop.ptt new file mode 100644 index 0000000..d11b619 --- /dev/null +++ b/test/src/tip_pgd-psb-stop.ptt @@ -0,0 +1,64 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TraceStop is applied to the same instruction as a preceding TIP.PGD. +; +; Variant: encountered PSB+ between TIP.PGD and TraceStop +; + +org 0x100000 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: nop + + +; @pt p5: fup(1: %l2) +; @pt p6: tip.pgd(0: %l3) +l2: nop +l3: hlt + +; @pt p7: psb() +; @pt p8: psbend() +; @pt p9: stop() + +; @pt .exp(ptxed) +;%0l1 # nop +;[disabled] +;[stopped] + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 fup 1: %?l2.2 +;%0p6 tip.pgd 0: %?l3.0 +;%0p7 psb +;%0p8 psbend +;%0p9 stop diff --git a/test/src/tip_pgd-stop.ptt b/test/src/tip_pgd-stop.ptt new file mode 100644 index 0000000..4017ab1 --- /dev/null +++ b/test/src/tip_pgd-stop.ptt @@ -0,0 +1,59 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TraceStop is applied to the same instruction as a preceding TIP.PGD. +; +; Variant: encountered during normal tracing. +; + +org 0x100000 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: fup(3: %l1) +; @pt p4: psbend() +l1: nop + + +; @pt p5: fup(1: %l2) +; @pt p6: tip.pgd(0: %l3) +; @pt p7: stop() +l2: nop +l3: hlt + +; @pt .exp(ptxed) +;%0l1 # nop +;[disabled] +;[stopped] + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 fup 3: %0l1 +;%0p4 psbend +;%0p5 fup 1: %?l2.2 +;%0p6 tip.pgd 0: %?l3.0 +;%0p7 stop diff --git a/test/src/tip_pgd-tnt_not_taken.ptt b/test/src/tip_pgd-tnt_not_taken.ptt new file mode 100644 index 0000000..98e89cf --- /dev/null +++ b/test/src/tip_pgd-tnt_not_taken.ptt @@ -0,0 +1,61 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD is applied to the next branch that would normally +; generate a TNT packet. +; +; Variant: disable on not taken. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: jle l3 +; @pt p5: tnt(t) +l2: hlt +l3: jle l5 +l4: nop +l5: hlt +; @pt p6: tip.pgd(3: %l4) + + +; @pt .exp(ptxed) +;%0l1 # jle l3 +;%0l3 # jle l5 +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tnt.8 ! +;%0p6 tip.pgd 3: %0l4 diff --git a/test/src/tip_pgd-tnt_taken.ptt b/test/src/tip_pgd-tnt_taken.ptt new file mode 100644 index 0000000..28d1d39 --- /dev/null +++ b/test/src/tip_pgd-tnt_taken.ptt @@ -0,0 +1,61 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD is applied to the next branch that would normally +; generate a TNT packet. +; +; Variant: disable on taken. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: jle l3 +; @pt p5: tnt(t) +l2: hlt +l3: jle l5 +l4: hlt +l5: nop +; @pt p6: tip.pgd(3: %l5) + + +; @pt .exp(ptxed) +;%0l1 # jle l3 +;%0l3 # jle l5 +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tnt.8 ! +;%0p6 tip.pgd 3: %0l5 diff --git a/test/src/tip_pgd-tsx.ptt b/test/src/tip_pgd-tsx.ptt new file mode 100644 index 0000000..0593306 --- /dev/null +++ b/test/src/tip_pgd-tsx.ptt @@ -0,0 +1,76 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TSX state is applied correctly when branch tracing is disabled. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: psbend() + +; @pt p4: mode.tsx(begin) +; @pt p5: tip.pge(3: %l1) +l1: nop + +; @pt p6: fup(1: %l2) +; @pt p7: tip.pgd(0: %l3) +l2: nop +l3: hlt + +; @pt p8: mode.tsx(abort) +; @pt p9: tip.pge(3: %l4) +l4: nop + +; @pt p10: fup(1: %l5) +; @pt p11: tip.pgd(0: %l6) +l5: nop +l6: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 psbend +;%0p4 mode.tsx intx +;%0p5 tip.pge 3: %?l1 +;%0p6 fup 1: %?l2.2 +;%0p7 tip.pgd 0: %?l3.0 +;%0p8 mode.tsx abrt +;%0p9 tip.pge 3: %?l4 +;%0p10 fup 1: %?l5.2 +;%0p11 tip.pgd 0: %?l6.0 + + +; @pt .exp(ptxed) +;[enabled] +;? %0l1 # nop +;[disabled] +;[enabled] +;%0l4 # nop +;[disabled] diff --git a/test/src/tip_pgd_noip-far_jump.ptt b/test/src/tip_pgd_noip-far_jump.ptt new file mode 100644 index 0000000..59fffee --- /dev/null +++ b/test/src/tip_pgd_noip-far_jump.ptt @@ -0,0 +1,54 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD with suppressed IP payload is applied to the next far branch +; (far jump in this case). +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: jmp far [rax] ; l3 +l2: hlt +; @pt p5: tip.pgd(0: %l3) + +l3: hlt + +; @pt .exp(ptxed) +;%0l1 # jmp far [rax] ; l3 +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 0: %?l3.0 diff --git a/test/src/tip_pgd_noip-mov_cr3.ptt b/test/src/tip_pgd_noip-mov_cr3.ptt new file mode 100644 index 0000000..323d0f2 --- /dev/null +++ b/test/src/tip_pgd_noip-mov_cr3.ptt @@ -0,0 +1,54 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD with suppressed IP payload is applied to the next +; MOV CR3 instruction. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() + +l1: mov cr3, rax +l2: hlt +; @pt p5: tip.pgd(0: %l2) + + +; @pt .exp(ptxed) +;%0l1 # mov cr3, rax +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tip.pgd 0: %?l2.0 diff --git a/test/src/tip_pge-fup-tip_pgd-tip_pge.ptt b/test/src/tip_pge-fup-tip_pgd-tip_pge.ptt new file mode 100644 index 0000000..ebb51ee --- /dev/null +++ b/test/src/tip_pge-fup-tip_pgd-tip_pge.ptt @@ -0,0 +1,64 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test a combination of enable and async disable on the same IP. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: psbend() +; @pt p4: tip.pge(3: %l1) +l1: nop +; @pt p5: fup(1: %l1) +; @pt p6: tip.pgd(0: %l1) +; @pt p7: tip.pge(3: %l1) +l2: nop +l3: nop +; @pt p8: fup(1: %l3) +; @pt p9: tip.pgd(0: %l4) +l4: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 psbend +;%0p4 tip.pge 3: %0l1 +;%0p5 fup 1: %?l1.2 +;%0p6 tip.pgd 0: %?l1.0 +;%0p7 tip.pge 3: %0l1 +;%0p8 fup 1: %?l3.2 +;%0p9 tip.pgd 0: %?l4.0 + + +; @pt .exp(ptxed) +;[enabled] +;%0l1 # nop +;%0l2 # nop +;[disabled] diff --git a/test/src/tip_pge-fup-tip_pgd.ptt b/test/src/tip_pge-fup-tip_pgd.ptt new file mode 100644 index 0000000..a303282 --- /dev/null +++ b/test/src/tip_pge-fup-tip_pgd.ptt @@ -0,0 +1,56 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test enable and async disable around a single instruction. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: psbend() +; @pt p4: tip.pge(3: %l1) +l1: nop +l2: nop +; @pt p5: fup(1: %l2) +; @pt p6: tip.pgd(0: %l3) +l3: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 psbend +;%0p4 tip.pge 3: %0l1 +;%0p5 fup 1: %?l2.2 +;%0p6 tip.pgd 0: %?l3.0 + + +; @pt .exp(ptxed) +;[enabled] +;%0l1 # nop +;[disabled] diff --git a/test/src/tnt-tip_pgd_noip-sysret.ptt b/test/src/tnt-tip_pgd_noip-sysret.ptt new file mode 100644 index 0000000..eee4a6f --- /dev/null +++ b/test/src/tnt-tip_pgd_noip-sysret.ptt @@ -0,0 +1,64 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TIP.PGD with suppressed IP payload is applied to the next far branch +; (sysret in this case). +; +; Variant: consume a TNT before to test that the disable event is not +; applied too early. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: jle l3 +; @pt p5: tnt(tt) +l2: hlt +l3: jle l5 +l4: hlt +l5: sysret +l6: hlt +; @pt p6: tip.pgd(0: %l6) + + +; @pt .exp(ptxed) +;%0l1 # jle l3 +;%0l3 # jle l5 +;%0l5 # sysret +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 tnt.8 !! +;%0p6 tip.pgd 0: %?l6.0 diff --git a/test/src/tnt_n-eos.ptt b/test/src/tnt_n-eos.ptt new file mode 100644 index 0000000..2103c1d --- /dev/null +++ b/test/src/tnt_n-eos.ptt @@ -0,0 +1,55 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that we indicate the end of the trace without a TIP.PGD. +; +; Variant: the trace ends after a non-taken conditional branch +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: je l2 +l1: hlt + +; @pt p4:tnt(n) +l2: hlt + + +; @pt .exp(ptxed) +;%0l0 +;[end of trace] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %0l0 +;%0p3 psbend +;%0p4 tnt.8 . diff --git a/test/src/tnt_t-eos.ptt b/test/src/tnt_t-eos.ptt new file mode 100644 index 0000000..1469672 --- /dev/null +++ b/test/src/tnt_t-eos.ptt @@ -0,0 +1,55 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that we indicate the end of the trace without a TIP.PGD. +; +; Variant: the trace ends after a taken conditional branch +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: je l2 +l1: hlt + +; @pt p4:tnt(t) +l2: hlt + + +; @pt .exp(ptxed) +;%0l0 +;[end of trace] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %0l0 +;%0p3 psbend +;%0p4 tnt.8 ! diff --git a/test/src/truncated.ptt b/test/src/truncated.ptt new file mode 100644 index 0000000..f2be373 --- /dev/null +++ b/test/src/truncated.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test an instruction crossing section boundaries. +; +; opt:ptxed --raw truncated.bin:0x2:0x1002 +; + +org 0x1000 +bits 64 + +; @pt p0: psb() +; @pt p1: mode.exec(64bit) +; @pt p2: fup(3: %l0) +; @pt p3: psbend() +l0: nop + +l1: jmp l3 +l2: hlt + +l3: nop + +; @pt p4: fup(1: %l4) +; @pt p5: tip.pgd(0: %l4) +l4: hlt + + +; @pt .exp(ptxed) +;%0l0 # nop +;%0l1 # jmp l3 +;%0l3 # nop +;[disabled] + +; @pt .exp(ptdump) +;%0p0 psb +;%0p1 mode.exec cs.l +;%0p2 fup 3: %?l0 +;%0p3 psbend +;%0p4 fup 1: %?l4.2 +;%0p5 tip.pgd 0: %?l4.0 diff --git a/test/src/tsc-cbr-cyc-tsc.ptt b/test/src/tsc-cbr-cyc-tsc.ptt new file mode 100644 index 0000000..9147c49 --- /dev/null +++ b/test/src/tsc-cbr-cyc-tsc.ptt @@ -0,0 +1,57 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test CYC-based TSC estimation. +; +; Variant: CBR-based calibration, time correction on TSC +; +; opt:ptdump --time --time-delta +; opt:ptdump --nom-freq 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: tsc(0xa0000) +; @pt p3: cbr(0x2) +; @pt p4: psbend() + +; @pt p5: cyc(0x3) +; @pt p6: cyc(0x1) + +; @pt p7: tsc(0xa0007) +; @pt p8: cyc(0x2) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 tsc a0000 tsc +a0000 +;%0p3 cbr 2 +;%0p4 psbend +;%0p5 cyc 3 tsc +6 +;%0p6 cyc 1 tsc +2 +;%0p7 tsc a0007 tsc -1 +;%0p8 cyc 2 tsc +4 diff --git a/test/src/tsc-cyc_calibrate.ptt b/test/src/tsc-cyc_calibrate.ptt new file mode 100644 index 0000000..770a848 --- /dev/null +++ b/test/src/tsc-cyc_calibrate.ptt @@ -0,0 +1,69 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test CYC-based TSC estimation. +; +; Variant: TSC-based calibration +; +; opt:ptdump --time --time-delta + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: tsc(0xa0000) +; @pt p3: psbend() + +; @pt p4: cyc(0x100) +; @pt p5: tsc(0xa0200) +; @pt p6: cyc(0x100) + +; @pt p7: psb() +; @pt p8: tsc(0xa0300) +; @pt p9: psbend() + +; @pt p10: cyc(0x100) +; @pt p11: tsc(0xa0600) +; @pt p12: cyc(0x100) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 tsc a0000 tsc +a0000 +;%0p3 psbend +;[%p4: calibration error: no timing information] +;[%p4: error updating time: no calibration] +;%0p4 cyc 100 tsc +0 +;%0p5 tsc a0200 tsc +200 +;[%p6: calibration error: no timing information] +;[%p6: error updating time: no calibration] +;%0p6 cyc 100 tsc +0 +;%0p7 psb +;%0p8 tsc a0300 tsc +100 +;%0p9 psbend +;%0p10 cyc 100 tsc +100 +;%0p11 tsc a0600 tsc +200 +;%0p12 cyc 100 tsc +100 diff --git a/test/src/tsc-mtc-tma-mtc.ptt b/test/src/tsc-mtc-tma-mtc.ptt new file mode 100644 index 0000000..ac89116 --- /dev/null +++ b/test/src/tsc-mtc-tma-mtc.ptt @@ -0,0 +1,52 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC-based TSC estimation. +; +; Variant: MTC between TSC and TMA are ignored +; +; opt:ptdump --time --time-delta --no-tcal +; opt:ptdump --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: mtc(0xc1) +; @pt p5: tma(0xc2d2, 0xe) +; @pt p6: mtc(0xc3) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 mtc c1 tsc +0 +;%0p5 tma c2d2, e tsc +0 +;%0p6 mtc c3 tsc +aa diff --git a/test/src/tsc-tma-cbr-cyc-mtc.ptt b/test/src/tsc-tma-cbr-cyc-mtc.ptt new file mode 100644 index 0000000..6eae590 --- /dev/null +++ b/test/src/tsc-tma-cbr-cyc-mtc.ptt @@ -0,0 +1,57 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: CBR-based calibration, +; CYC between TMA and MTC, time corrected on MTC +; +; opt:ptdump --time --time-delta +; opt:ptdump --nom-freq 4 --mtc-freq 4 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 1 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0x12, 0x4) +; @pt p5: cbr(0x2) +; @pt p6: cyc(0x3) +; @pt p7: cyc(0x1) +; @pt p8: mtc(0x2) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma 12, 4 tsc +0 +;%0p5 cbr 2 +;%0p6 cyc 3 tsc +6 +;%0p7 cyc 1 tsc +2 +;%0p8 mtc 2 tsc -5 diff --git a/test/src/tsc-tma-cbr-cyc.ptt b/test/src/tsc-tma-cbr-cyc.ptt new file mode 100644 index 0000000..a81d558 --- /dev/null +++ b/test/src/tsc-tma-cbr-cyc.ptt @@ -0,0 +1,55 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: CBR-based calibration, +; CYC between TMA and MTC (not shown) +; +; opt:ptdump --time --time-delta +; opt:ptdump --nom-freq 4 --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0x102, 0x8) +; @pt p5: cbr(0x2) +; @pt p6: cyc(0x3) +; @pt p7: cyc(0x1) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma 102, 8 tsc +0 +;%0p5 cbr 2 +;%0p6 cyc 3 tsc +6 +;%0p7 cyc 1 tsc +2 diff --git a/test/src/tsc-tma-cbr-mtc-cyc-mtc.ptt b/test/src/tsc-tma-cbr-mtc-cyc-mtc.ptt new file mode 100644 index 0000000..ad345ef --- /dev/null +++ b/test/src/tsc-tma-cbr-mtc-cyc-mtc.ptt @@ -0,0 +1,58 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: CBR-based calibration, time correction on MTC +; +; opt:ptdump --time --time-delta +; opt:ptdump --nom-freq 4 --mtc-freq 0 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0xf01, 0x1) +; @pt p5: cbr(0x2) +; @pt p6: mtc(0x2) +; @pt p7: cyc(0x3) +; @pt p8: cyc(0x1) +; @pt p9: mtc(0x3) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma f01, 1 tsc +0 +;%0p5 cbr 2 +;%0p6 mtc 2 tsc +3 +;%0p7 cyc 3 tsc +6 +;%0p8 cyc 1 tsc +2 +;%0p9 mtc 3 tsc -4 diff --git a/test/src/tsc-tma-cbr-mtc-cyc-no_cyc.ptt b/test/src/tsc-tma-cbr-mtc-cyc-no_cyc.ptt new file mode 100644 index 0000000..0962cf5 --- /dev/null +++ b/test/src/tsc-tma-cbr-mtc-cyc-no_cyc.ptt @@ -0,0 +1,56 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC-based TSC estimation. +; +; Variant: Ignore CYC packets. +; +; opt:ptdump --time --time-delta --no-cyc +; opt:ptdump --nom-freq 4 --mtc-freq 0 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0xf01, 0x1) +; @pt p5: cbr(0x2) +; @pt p6: mtc(0x2) +; @pt p7: cyc(0x3) +; @pt p8: cyc(0x1) +; @pt p9: mtc(0x3) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma f01, 1 tsc +0 +;%0p5 cbr 2 +;%0p6 mtc 2 tsc +3 +;%0p9 mtc 3 tsc +4 diff --git a/test/src/tsc-tma-cbr-mtc-cyc-tsc.ptt b/test/src/tsc-tma-cbr-mtc-cyc-tsc.ptt new file mode 100644 index 0000000..395a493 --- /dev/null +++ b/test/src/tsc-tma-cbr-mtc-cyc-tsc.ptt @@ -0,0 +1,58 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: CBR-based calibration, time correction on TSC +; +; opt:ptdump --time --time-delta +; opt:ptdump --nom-freq 4 --mtc-freq 0 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0xf01, 0x1) +; @pt p5: cbr(0x2) +; @pt p6: mtc(0x2) +; @pt p7: cyc(0x3) +; @pt p8: cyc(0x1) +; @pt p9: tsc(0xa0008) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma f01, 1 tsc +0 +;%0p5 cbr 2 +;%0p6 mtc 2 tsc +3 +;%0p7 cyc 3 tsc +6 +;%0p8 cyc 1 tsc +2 +;%0p9 tsc a0008 tsc -3 diff --git a/test/src/tsc-tma-cbr-mtc-cyc.ptt b/test/src/tsc-tma-cbr-mtc-cyc.ptt new file mode 100644 index 0000000..0bf9689 --- /dev/null +++ b/test/src/tsc-tma-cbr-mtc-cyc.ptt @@ -0,0 +1,56 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: CBR-based calibration +; +; opt:ptdump --time --time-delta +; opt:ptdump --nom-freq 4 --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0x102, 0x8) +; @pt p5: cbr(0x2) +; @pt p6: mtc(0x2) +; @pt p7: cyc(0x3) +; @pt p8: cyc(0x1) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma 102, 8 tsc +0 +;%0p5 cbr 2 +;%0p6 mtc 2 tsc +3f0 +;%0p7 cyc 3 tsc +6 +;%0p8 cyc 1 tsc +2 diff --git a/test/src/tsc-tma-cbr-mtc-cyc_calibrate.ptt b/test/src/tsc-tma-cbr-mtc-cyc_calibrate.ptt new file mode 100644 index 0000000..1c81c2f --- /dev/null +++ b/test/src/tsc-tma-cbr-mtc-cyc_calibrate.ptt @@ -0,0 +1,60 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: CBR-based calibration, correct using MTC-based calibration +; +; opt:ptdump --time --time-delta +; opt:ptdump --nom-freq 4 --mtc-freq 4 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0x18, 0x8) +; @pt p5: cbr(0x2) +; @pt p6: mtc(0x2) +; @pt p7: cyc(0x100) +; @pt p8: mtc(0x3) +; @pt p9: cyc(0x100) +; @pt p10: mtc(0x4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma 18, 8 tsc +0 +;%0p5 cbr 2 +;%0p6 mtc 2 tsc +18 +;%0p7 cyc 100 tsc +200 +;%0p8 mtc 3 tsc -1c0 +;%0p9 cyc 100 tsc +40 +;%0p10 mtc 4 tsc +0 diff --git a/test/src/tsc-tma-cbr-mtc-mtc-cyc.ptt b/test/src/tsc-tma-cbr-mtc-mtc-cyc.ptt new file mode 100644 index 0000000..50d8c54 --- /dev/null +++ b/test/src/tsc-tma-cbr-mtc-mtc-cyc.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: CBR-based calibration +; High CYC threshold resulting in no CYC between MTCs +; +; opt:ptdump --time --time-delta +; opt:ptdump --mtc-freq 4 --nom-freq 1 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: tsc(0xa0000) +; @pt p3: tma(0x18, 0x8) +; @pt p4: cbr(2) +; @pt p5: psbend() + +; @pt p6: mtc(0x2) +; @pt p7: cyc(0x80) +; @pt p8: mtc(0x3) +; @pt p9: mtc(0x4) +; @pt p10: cyc(0xe0) +; @pt p11: mtc(0x5) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 tsc a0000 tsc +a0000 +;%0p3 tma 18, 8 tsc +0 +;%0p4 cbr 2 +;%0p5 psbend +;%0p6 mtc 2 tsc +18 +;%0p7 cyc 80 tsc +40 +;%0p8 mtc 3 tsc +0 +;%0p9 mtc 4 tsc +40 +;%0p10 cyc e0 tsc +30 +;%0p11 mtc 5 tsc +10 diff --git a/test/src/tsc-tma-cyc.ptt b/test/src/tsc-tma-cyc.ptt new file mode 100644 index 0000000..5a26f27 --- /dev/null +++ b/test/src/tsc-tma-cyc.ptt @@ -0,0 +1,52 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: No calibration. +; +; opt:ptdump --time --time-delta +; opt:ptdump --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0x102, 0x8) +; @pt p5: cyc(0x3) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma 102, 8 tsc +0 +;[%p5: calibration error: no timing information] +;[%p5: error updating time: no calibration] +;%0p5 cyc 3 tsc +0 diff --git a/test/src/tsc-tma-mtc-cyc_calibrate.ptt b/test/src/tsc-tma-mtc-cyc_calibrate.ptt new file mode 100644 index 0000000..fc1dc0c --- /dev/null +++ b/test/src/tsc-tma-mtc-cyc_calibrate.ptt @@ -0,0 +1,60 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: MTC-based calibration +; +; opt:ptdump --time --time-delta +; opt:ptdump --mtc-freq 4 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0x18, 0x8) +; @pt p5: mtc(0x2) +; @pt p6: cyc(0x100) +; @pt p7: mtc(0x3) +; @pt p8: cyc(0x100) +; @pt p9: mtc(0x4) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma 18, 8 tsc +0 +;%0p5 mtc 2 tsc +18 +;[%p6: calibration error: no timing information] +;[%p6: error updating time: no calibration] +;%0p6 cyc 100 tsc +0 +;%0p7 mtc 3 tsc +40 +;%0p8 cyc 100 tsc +40 +;%0p9 mtc 4 tsc +0 diff --git a/test/src/tsc-tma-mtc-mtc-cyc_calibrate.ptt b/test/src/tsc-tma-mtc-mtc-cyc_calibrate.ptt new file mode 100644 index 0000000..ac695eb --- /dev/null +++ b/test/src/tsc-tma-mtc-mtc-cyc_calibrate.ptt @@ -0,0 +1,63 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: MTC-based calibration +; no CYC between MTC +; +; opt:ptdump --time --time-delta +; opt:ptdump --mtc-freq 4 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0x18, 0x8) +; @pt p5: mtc(0x2) +; @pt p6: mtc(0x3) +; @pt p7: cyc(0x100) +; @pt p8: mtc(0x4) +; @pt p9: cyc(0x80) +; @pt p10: mtc(0x5) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma 18, 8 tsc +0 +;%0p5 mtc 2 tsc +18 +;%0p6 mtc 3 tsc +40 +;[%p7: calibration error: no timing information] +;[%p7: error updating time: no calibration] +;%0p7 cyc 100 tsc +0 +;%0p8 mtc 4 tsc +40 +;%0p9 cyc 80 tsc +40 +;%0p10 mtc 5 tsc +0 diff --git a/test/src/tsc-tma-mtc-tsc.ptt b/test/src/tsc-tma-mtc-tsc.ptt new file mode 100644 index 0000000..e19a408 --- /dev/null +++ b/test/src/tsc-tma-mtc-tsc.ptt @@ -0,0 +1,54 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC-based TSC estimation. +; +; Variant: time correction on TSC +; +; opt:ptdump --time --time-delta --no-tcal +; opt:ptdump --mtc-freq 4 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 1 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0xf012, 0x6) +; @pt p5: mtc(0x2) +; @pt p6: mtc(0x3) +; @pt p7: tsc(0xa0008) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma f012, 6 tsc +0 +;%0p5 mtc 2 tsc +1 +;%0p6 mtc 3 tsc +8 +;%0p7 tsc a0008 tsc -1 diff --git a/test/src/tsc-tma-mtc_absolute.ptt b/test/src/tsc-tma-mtc_absolute.ptt new file mode 100644 index 0000000..12dc22f --- /dev/null +++ b/test/src/tsc-tma-mtc_absolute.ptt @@ -0,0 +1,52 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC-based TSC estimation. +; +; Variant: time displayed as absolute number +; +; opt:ptdump --time --no-tcal +; opt:ptdump --mtc-freq 0 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 1 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0xff08, 0x1) +; @pt p5: mtc(0x9) +; @pt p6: mtc(0xa) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc 00000000000a0000 +;%0p4 tma ff08, 1 tsc 00000000000a0000 +;%0p5 mtc 9 tsc 00000000000a0000 +;%0p6 mtc a tsc 00000000000a0001 diff --git a/test/src/tsc-tma-mtc_infreq.ptt b/test/src/tsc-tma-mtc_infreq.ptt new file mode 100644 index 0000000..5de9dbe --- /dev/null +++ b/test/src/tsc-tma-mtc_infreq.ptt @@ -0,0 +1,55 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC-based TSC estimation. +; +; Variant: low MTC frequency +; +; the MTC frequency is too low for TMA to provide the full CTC +; estimate the missing bits using the next MTC +; +; opt:ptdump --time --no-tcal +; opt:ptdump --mtc-freq 12 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 1 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0018) +; @pt p4: tma(0xe020, 0x8) +; @pt p5: mtc(0xaf) +; @pt p6: mtc(0xb0) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0018 tsc 00000000000a0018 +;%0p4 tma e020, 8 tsc 00000000000a0018 +;%0p5 mtc af tsc 00000000000a0800 +;%0p6 mtc b0 tsc 00000000000a1000 diff --git a/test/src/tsc-tma-mtc_infreq_wrap.ptt b/test/src/tsc-tma-mtc_infreq_wrap.ptt new file mode 100644 index 0000000..0731d5a --- /dev/null +++ b/test/src/tsc-tma-mtc_infreq_wrap.ptt @@ -0,0 +1,55 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC-based TSC estimation. +; +; Variant: low MTC frequency, wrap CTC +; +; the MTC frequency is too low for TMA to provide the full CTC +; estimate the missing bits using the next MTC +; +; opt:ptdump --time --no-tcal +; opt:ptdump --mtc-freq 12 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 1 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0018) +; @pt p4: tma(0xf020, 0x8) +; @pt p5: mtc(0xa0) +; @pt p6: mtc(0xa1) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0018 tsc 00000000000a0018 +;%0p4 tma f020, 8 tsc 00000000000a0018 +;%0p5 mtc a0 tsc 00000000000a0800 +;%0p6 mtc a1 tsc 00000000000a1000 diff --git a/test/src/tsc-tma-mtc_relative.ptt b/test/src/tsc-tma-mtc_relative.ptt new file mode 100644 index 0000000..1a9ea79 --- /dev/null +++ b/test/src/tsc-tma-mtc_relative.ptt @@ -0,0 +1,52 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC-based TSC estimation. +; +; Variant: time displayed as delta +; +; opt:ptdump --time --time-delta --no-tcal +; opt:ptdump --mtc-freq 4 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 1 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0xf012, 0x6) +; @pt p5: mtc(0x2) +; @pt p6: mtc(0x3) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma f012, 6 tsc +0 +;%0p5 mtc 2 tsc +1 +;%0p6 mtc 3 tsc +8 diff --git a/test/src/tsc-tma-mtc_wrap.ptt b/test/src/tsc-tma-mtc_wrap.ptt new file mode 100644 index 0000000..c06301b --- /dev/null +++ b/test/src/tsc-tma-mtc_wrap.ptt @@ -0,0 +1,52 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC-based TSC estimation. +; +; Variant: wrap the CTC counter in MTC +; +; opt:ptdump --time --time-delta --no-tcal +; opt:ptdump --mtc-freq 0 --cpuid-0x15.eax 3 --cpuid-0x15.ebx 9 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0x1fff, 0x1) +; @pt p5: mtc(0x0) +; @pt p6: mtc(0x1) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma 1fff, 1 tsc +0 +;%0p5 mtc 0 tsc +2 +;%0p6 mtc 1 tsc +3 diff --git a/test/src/tsc-tma_zero_fc-cbr-cyc.ptt b/test/src/tsc-tma_zero_fc-cbr-cyc.ptt new file mode 100644 index 0000000..da7b215 --- /dev/null +++ b/test/src/tsc-tma_zero_fc-cbr-cyc.ptt @@ -0,0 +1,56 @@ +; Copyright (c) 2016-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC and CYC-based TSC estimation. +; +; Variant: CBR-based calibration, +; CYC between TMA and MTC (not shown) +; TMA provides an FC of zero (which triggers CYC adjustment) +; +; opt:ptdump --time --time-delta +; opt:ptdump --nom-freq 4 --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0x102, 0) +; @pt p5: cbr(0x2) +; @pt p6: cyc(0x3) +; @pt p7: cyc(0x1) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma 102, 0 tsc +0 +;%0p5 cbr 2 +;%0p6 cyc 3 tsc +6 +;%0p7 cyc 1 tsc +2 diff --git a/test/src/tsc_tma_mtc_gap.ptt b/test/src/tsc_tma_mtc_gap.ptt new file mode 100644 index 0000000..a6f8d24 --- /dev/null +++ b/test/src/tsc_tma_mtc_gap.ptt @@ -0,0 +1,52 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test MTC-based TSC estimation. +; +; Variant: omit some MTC +; +; opt:ptdump --time --time-delta --no-tcal +; opt:ptdump --mtc-freq 0 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 8 + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: psbend() + +; @pt p3: tsc(0xa0000) +; @pt p4: tma(0x1, 0x4) +; @pt p5: mtc(0x4) +; @pt p6: mtc(0xa) + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 psbend +;%0p3 tsc a0000 tsc +a0000 +;%0p4 tma 1, 4 tsc +0 +;%0p5 mtc 4 tsc +8 +;%0p6 mtc a tsc +18 diff --git a/test/src/tsx-abort.ptt b/test/src/tsx-abort.ptt new file mode 100644 index 0000000..fb12bb4 --- /dev/null +++ b/test/src/tsx-abort.ptt @@ -0,0 +1,75 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TSX aborts are shown correctly. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: nop + +; @pt p5: mode.tsx(begin) +; @pt p6: fup(1: %l2) +l2: nop + +; @pt p7: mode.tsx(abort) +; @pt p8: fup(1: %l3) +; @pt p9: tip(1: %l5) +l3: nop +l4: hlt + +l5: nop +; @pt p10: fup(1: %l6) +; @pt p11: tip.pgd(0: %l7) +l6: nop +l7: hlt + +; @pt .exp(ptxed) +;%0l1 # nop +;? %0l2 # nop +;[interrupt] +;[aborted] +;%0l5 # nop +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 mode.tsx intx +;%0p6 fup 1: %?l2.2 +;%0p7 mode.tsx abrt +;%0p8 fup 1: %?l3.2 +;%0p9 tip 1: %?l5.2 +;%0p10 fup 1: %?l6.2 +;%0p11 tip.pgd 0: %?l7.0 diff --git a/test/src/tsx-commit.ptt b/test/src/tsx-commit.ptt new file mode 100644 index 0000000..3394fa7 --- /dev/null +++ b/test/src/tsx-commit.ptt @@ -0,0 +1,70 @@ +; Copyright (c) 2013-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TSX commits are shown correctly. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: psbend() +l1: nop + +; @pt p5: mode.tsx(begin) +; @pt p6: fup(1: %l2) +l2: nop + +; @pt p7: mode.tsx(commit) +; @pt p8: fup(1: %l3) +l3: nop + +; @pt p9: fup(1: %l4) +; @pt p10: tip.pgd(0: %l5) +l4: nop +l5: hlt + +; @pt .exp(ptxed) +;%0l1 # nop +;? %0l2 # nop +;[committed] +;%0l3 # nop +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 psbend +;%0p5 mode.tsx intx +;%0p6 fup 1: %?l2.2 +;%0p7 mode.tsx +;%0p8 fup 1: %?l3.2 +;%0p9 fup 1: %?l4.2 +;%0p10 tip.pgd 0: %?l5.0 diff --git a/test/src/tsx-no_spurious_commit.ptt b/test/src/tsx-no_spurious_commit.ptt new file mode 100644 index 0000000..ece8245 --- /dev/null +++ b/test/src/tsx-no_spurious_commit.ptt @@ -0,0 +1,71 @@ +; Copyright (c) 2014-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that TSX status updates in adjacent PSB+ do not cause spurious +; commit indications. +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: fup(3: %l1) +; @pt p3: mode.exec(64bit) +; @pt p4: mode.tsx(commit) +; @pt p5: psbend() +l1: nop + +; @pt p6: psb() +; @pt p7: fup(3: %l2) +; @pt p8: mode.exec(64bit) +; @pt p9: mode.tsx(commit) +; @pt p10: psbend() +l2: nop + +; @pt p11: fup(1: %l3) +; @pt p12: tip.pgd(0: %l4) +l3: nop +l4: hlt + +; @pt .exp(ptxed) +;%0l1 # nop +;%0l2 # nop +;[disabled] + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 fup 3: %0l1 +;%0p3 mode.exec cs.l +;%0p4 mode.tsx +;%0p5 psbend +;%0p6 psb +;%0p7 fup 3: %0l2 +;%0p8 mode.exec cs.l +;%0p9 mode.tsx +;%0p10 psbend +;%0p11 fup 1: %?l3.2 +;%0p12 tip.pgd 0: %?l4.0 diff --git a/test/src/vmcs-far_call.ptt b/test/src/vmcs-far_call.ptt new file mode 100644 index 0000000..2e2c0c2 --- /dev/null +++ b/test/src/vmcs-far_call.ptt @@ -0,0 +1,67 @@ +; Copyright (c) 2015-2017, Intel Corporation +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are met: +; +; * Redistributions of source code must retain the above copyright notice, +; this list of conditions and the following disclaimer. +; * Redistributions in binary form must reproduce the above copyright notice, +; this list of conditions and the following disclaimer in the documentation +; and/or other materials provided with the distribution. +; * Neither the name of Intel Corporation nor the names of its contributors +; may be used to endorse or promote products derived from this software +; without specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. + +; Test that VMCS binds to a far branch +; + +org 0x100000 +bits 64 + +; @pt p1: psb() +; @pt p2: mode.exec(64bit) +; @pt p3: fup(3: %l1) +; @pt p4: psbend() +l1: nop + +; @pt p5: vmcs(0xcdcdc000) +; @pt p6: tip(3: %l4) +l2: call far [rax] ; l4 +l3: hlt + +l4: nop + +; @pt p7: fup(1: %l5) +; @pt p8: tip.pgd(0: %l6) +l5: nop +l6: hlt + + +; @pt .exp(ptdump) +;%0p1 psb +;%0p2 mode.exec cs.l +;%0p3 fup 3: %?l1 +;%0p4 psbend +;%0p5 vmcs cdcdc000 vmcs 00000000cdcdc000 +;%0p6 tip 3: %?l4 +;%0p7 fup 1: %?l5.2 +;%0p8 tip.pgd 0: %?l6.0 + + +; @pt .exp(ptxed) +;%0l1 # nop +;%0l2 # call far [rax] # l4 +;%0l4 # nop +;[disabled]