Commit 6ebd6e73 authored by Dorel Coman's avatar Dorel Coman Committed by Oliver Horst
Browse files

memguard: removed usage of uxTaskGetSystemState() - moved memguard folder...

memguard: removed usage of uxTaskGetSystemState() - moved memguard folder inside the extra folder of FreeRTOS
parents
cmake_minimum_required(VERSION 3.7 FATAL_ERROR)
target_sources(
freertos-plus
#
PRIVATE
"${CMAKE_CURRENT_LIST_DIR}/ewma.c"
"${CMAKE_CURRENT_LIST_DIR}/memguard.c"
"${CMAKE_CURRENT_LIST_DIR}/perfmon.c"
)
BSD with Attribution License
Copyright (c) 2015-2020 fortiss GmbH - Research Institute of the
Free State of Bavaria, Guerickestr. 25, 80805 Munich, Germany
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
4. Redistributions of any form whatsoever must retain the following
acknowledgment: 'This product includes software developed by the
fortiss GmbH -- Research Institute of the Free State of Bavaria,
Munich, Germany (https://www.fortiss.org/).'
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
SPDX-License-Identifier: BSD-3-Clause-Attribution
Added platform_info and ipi files from Johannes, to be ignored cause there are only for now there
TO BUILD, execute in _build folder:
cmake -DCMAKE_TOOLCHAIN_FILE=../toolchain-aarch64.cmake -DCMAKE_INSTALL_PREFIX=../_install -DPLATFORM=zcu102-zynqmp -DXME_TARGET_IDENTIFIER=freertos-arm -DXME_NODE_BUILD_LIBRARY=True -DCMAKE_BUILD_TYPE=Debug ..
to make:
make memguard
to trace prints:
picocom -b 115200 /dev/ttyUSB0
######################################################################
-------------------------
assign seq: 1.1084
assign rand: 5.5176
read seq: 1.1274
read rand: 17.17666
copy seq: 0.762
copy rand: 10.10653
scale seq: 0.890
scale rand: 10.10767
sum: 2
triad: 3
random: 18
random: 47
-------------------
sequential
cycles: 11076475790 instruct: 9515957425 bus accesses: 595519159
IPC: 0.859 B/W 227 MB/s
seq read: cycles: 186589290 instruct: 156000109 bus accesses: 6064856
rand read: cycles: 2494631596 instruct: 396000108 bus accesses: 47459616
copy read: cycles: 219554436 instruct: 192000108 bus accesses: 12000038
triad read: cycles: 444317215 instruct: 276000108 bus accesses: 23970548
BUSS ACCESS = 16 byte = 128 bit
CACHE LINE SIZE = 64 byte = 512 bit -> 4 ACCESSES
cache enabled
seq assign: cycles: 156093830 instruct: 156000109 bus accesses: 6.000.136 = 0,5 access for 1 elements of 64 bit = 64 bit
rand assign: cycles: 763440134 instruct: 396000108 bus accesses: 94.917.400 = 8 accesses for 1 element of 64 bit = 1024 bit = 2 * cache line
seq read: cycles: 183435041 instruct: 156000108 bus accesses: 6.066.048 = 0.5 access for 1 element of 64 bit = 64 bit
rand read: cycles: 2496584417 instruct: 396000108 bus accesses: 47.459.136 = 4 access for 1 element = 512 bit
copy read: cycles: 219558344 instruct: 192000108 bus accesses: 12000048 = 0,5 access for 1 elem of 64 bit
sum read: cycles: 396509540 instruct: 252000108 bus accesses: 23.948.450 = 2 accesses per operation = (2 read and 1 write which takes all access) * 12.000.000
triad read: cycles: 444454433 instruct: 276000108 bus accesses: 23.987.652
-------------------------------------------------------------------
1 cycles: 10800182099 instruct: 10790997547 bus accesses: 2689725
IPC: 0.999 B/W 4 MB/s
for (int i = 0; i < ARRAY_SIZE / 100; ++i) {
b[i] = CONST;
}
----------------------------------------------------------------
1 cycles: 10800182138 instruct: 10254462773 bus accesses: 17773853
IPC: 0.949 B/W 27 MB/s
int val = 0;
for (int i = 0; i < ARRAY_SIZE / 90; ++i) {
b[i] = CONST;
val++;
val += CONST;
}
1 cycles: 10800182138 instruct: 10254462773 bus accesses: 17773853
IPC: 0.949 B/W 27 MB/s
for (int i = 0; i < ARRAY_SIZE / 86; ++i) {
b[i] = CONST;
}
-------------------------------------------------------------------
1 cycles: 10800181929 instruct: 10791527950 bus accesses: 86328115
IPC: 0.999 B/W 131 MB/s
for (int i = 0; i < ARRAY_SIZE / 82; ++i) {
b[i] = CONST;
}
---------------------------------------------------------------------
1 cycles: 10800181480 instruct: 10791750159 bus accesses: 113526349
IPC: 0.999 B/W 173 MB/s
for (int i = 0; i < ARRAY_SIZE / 75; ++i) {
b[i] = CONST;
}
----------------------------------------------------------
RANDOM READ
cycles: 10800183199 instruct: 1711978884 bus accesses: 205247457
IPC: 0.158 B/W 313 MB/s
-------------------------------------------------------------
1 cycles: 10800181890 instruct: 10792370216 bus accesses: 215097351
IPC: 0.999 B/W 328 MB/s
for (int i = 0; i < ARRAY_SIZE / 50; ++i) {
b[i] = CONST;
}
--------------------------------------------------------
3 cycles: 10800194615 instruct: 2340669763 bus accesses: 236554557
IPC: 0.216 B/W 360 MB/s
for (int i = 0; i < ARRAY_SIZE / 2; ++i) {
b[i] = c[i];
}
for (int i = ARRAY_SIZE / 2; i < ARRAY_SIZE; ++i) {
val = a[rand() % ARRAY_SIZE];
}
------------------------------------------------------------
3 cycles: 10800194752 instruct: 2766257282 bus accesses: 249639391
IPC: 0.256 B/W 380 MB/s
for (int i = 0; i < ARRAY_SIZE / 2; ++i) {
b[i] = c[i];
}
for (int i = ARRAY_SIZE * 0.3; i < ARRAY_SIZE *0.6; ++i) {
val = a[rand() % ARRAY_SIZE];
}
-----------------------------------------------------------
3 cycles: 10800194177 instruct: 3158642595 bus accesses: 269736891
IPC: 0.292 B/W 411 MB/s
for (int i = 0; i < ARRAY_SIZE / 2; ++i) {
b[i] = c[i];
}
for (int i = ARRAY_SIZE * 0.3; i < ARRAY_SIZE *0.5; ++i) {
val = a[rand() % ARRAY_SIZE];
}
-----------------------------------------------------------------
3 cycles: 10800195126 instruct: 4092857821 bus accesses: 317393292
IPC: 0.378 B/W 484 MB/s
for (int i = 0; i < ARRAY_SIZE / 2; ++i) {
b[i] = c[i];
}
for (int i = ARRAY_SIZE * 0.3; i < ARRAY_SIZE *0.4; ++i) {
val = a[rand() % ARRAY_SIZE];
}
------------------------------------------------------------
1 cycles: 10800182102 instruct: 1994709488 bus accesses: 382401729
IPC: 0.184 B/W 583 MB/s
for (int i = 0; i < ARRAY_SIZE / 10; ++i) {
b[rand() % ARRAY_SIZE] = CONST;
val = a[rand() % ARRAY_SIZE];
val = a[rand() % ARRAY_SIZE];
}
------------------------------------------------------------
SEQ ASSIGN
cycles: 10800182144 instruct: 10793685656 bus accesses: 414634794
IPC: 0.999 B/W 632 MB/s
-------------------------------------------------------------
50% random assign + 50% random read = 798MB/s ( first part second part read)
for (int i = 0; i < ARRAY_SIZE; ++i) { //also divided by 10 works same speed
b[rand() % ARRAY_SIZE] = CONST;
val = a[rand() % ARRAY_SIZE];
}
----------------------------------------------------------------
COPY
cycles: 10800183990 instruct: 9218344280 bus accesses: 576169349
IPC: 0.853 B/W 879 MB/s
TRIAD?
cycles: 10800185540 instruct: 6707792436 bus accesses: 583216920
IPC: 0.621 B/W 889 MB/s
------------------------------------------------------------------
2 cycles: 10800194674 instruct: 3606999423 bus accesses: 713219047
IPC: 0.333 B/W 1088 MB/s
for (int i = 0; i < ARRAY_SIZE * 0.70; ++i) {
b[rand() % ARRAY_SIZE] = CONST;
}
for (int i = ARRAY_SIZE * 0.70; i < ARRAY_SIZE; ++i) {
val = a[rand() % ARRAY_SIZE];
}
----------------------------------------------------------------
1 cycles: 10800182159 instruct: 3173242376 bus accesses: 759481229
IPC: 0.293 B/W 1158 MB/s
for (int i = 0; i < ARRAY_SIZE; ++i) {
b[rand() % ARRAY_SIZE] = CONST;
b[rand() % ARRAY_SIZE] = CONST;
val = a[rand() % ARRAY_SIZE];
}
------------------------------------------------------------------
2 cycles: 10800194939 instruct: 7278484124 bus accesses: 1016331698
IPC: 0.673 B/W 1550 MB/s
for (int i = 0; i < ARRAY_SIZE / 3; ++i) {
b[i] = c[i];
}
for (int i = 0; i < ARRAY_SIZE / 8; ++i) {
a[rand() % ARRAY_SIZE] = CONST;
}
------------------------------------------------------------------
2 cycles: 10800194698 instruct: 7009282930 bus accesses: 1067706947
IPC: 0.648 B/W 1629 MB/s
for (int i = 0; i < ARRAY_SIZE / 3; ++i) {
b[i] = c[i];
}
for (int i = 0; i < ARRAY_SIZE / 6; ++i) {
a[rand() % ARRAY_SIZE] = CONST;
}
------------------------------------------------------------------
2 cycles: 10800195106 instruct: 6457498135 bus accesses: 1173589644
IPC: 0.597 B/W 1790 MB/s
for (int i = 0; i < ARRAY_SIZE / 4; ++i) {
b[i] = c[i];
}
for (int i = 0; i < ARRAY_SIZE / 4; ++i) {
a[rand() % ARRAY_SIZE] = CONST;
}
---------------------------------------------------------------
Random assign
cycles: 10800183566 instruct: 5625831461 bus accesses: 1348495001
IPC: 0.520 B/W 2057 MB/s
for (int j = 0; j < ARRAY_SIZE; ++j) {
b[rand() % ARRAY_SIZE] = CONST;
}
pmu_write_counter(BENCH_1_CNTR_ID_BUS_ACCESS, 0);
pmu_write_counter(BENCH_2_CNTR_ID, 0);
pmu_write_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES, 0);
for (int i = 0; i < ARRAY_SIZE; ++i) {
b[i] = CONST;
}
cycle_count = pmu_read_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES);
bus_accesses = pmu_read_counter(BENCH_1_CNTR_ID_BUS_ACCESS);
instructions = pmu_read_counter(BENCH_2_CNTR_ID);
printf("seq assign: cycles: %llu instruct: %llu bus accesses: %llu\n\r", cycle_count, instructions, bus_accesses);
pmu_write_counter(BENCH_1_CNTR_ID_BUS_ACCESS, 0);
pmu_write_counter(BENCH_2_CNTR_ID, 0);
pmu_write_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES, 0);
for (int i = 0; i < ARRAY_SIZE; ++i) {
b[rand() % ARRAY_SIZE] = CONST;
}
cycle_count = pmu_read_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES);
bus_accesses = pmu_read_counter(BENCH_1_CNTR_ID_BUS_ACCESS);
instructions = pmu_read_counter(BENCH_2_CNTR_ID);
printf("rand assign: cycles: %llu instruct: %llu bus accesses: %llu\n\r", cycle_count, instructions, bus_accesses);
pmu_write_counter(BENCH_1_CNTR_ID_BUS_ACCESS, 0);
pmu_write_counter(BENCH_2_CNTR_ID, 0);
pmu_write_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES, 0);
for (int i = 0; i < ARRAY_SIZE; ++i) {
val = a[i];
}
cycle_count = pmu_read_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES);
bus_accesses = pmu_read_counter(BENCH_1_CNTR_ID_BUS_ACCESS);
instructions = pmu_read_counter(BENCH_2_CNTR_ID);
printf("seq read: cycles: %llu instruct: %llu bus accesses: %llu\n\r", cycle_count, instructions, bus_accesses);
pmu_write_counter(BENCH_1_CNTR_ID_BUS_ACCESS, 0);
pmu_write_counter(BENCH_2_CNTR_ID, 0);
pmu_write_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES, 0);
for (int i = 0; i < ARRAY_SIZE; ++i) {
val = a[rand() % ARRAY_SIZE];
}
cycle_count = pmu_read_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES);
bus_accesses = pmu_read_counter(BENCH_1_CNTR_ID_BUS_ACCESS);
instructions = pmu_read_counter(BENCH_2_CNTR_ID);
printf("rand read: cycles: %llu instruct: %llu bus accesses: %llu\n\r", cycle_count, instructions, bus_accesses);
pmu_write_counter(BENCH_1_CNTR_ID_BUS_ACCESS, 0);
pmu_write_counter(BENCH_2_CNTR_ID, 0);
pmu_write_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES, 0);
for (int i = 0; i < ARRAY_SIZE; ++i) {
c[i] = b[i];
}
cycle_count = pmu_read_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES);
bus_accesses = pmu_read_counter(BENCH_1_CNTR_ID_BUS_ACCESS);
instructions = pmu_read_counter(BENCH_2_CNTR_ID);
printf("copy read: cycles: %llu instruct: %llu bus accesses: %llu\n\r", cycle_count, instructions, bus_accesses);
pmu_write_counter(BENCH_1_CNTR_ID_BUS_ACCESS, 0);
pmu_write_counter(BENCH_2_CNTR_ID, 0);
pmu_write_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES, 0);
for (int i = 0; i < ARRAY_SIZE; ++i) {
c[i] = a[i] + b[i];
}
cycle_count = pmu_read_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES);
bus_accesses = pmu_read_counter(BENCH_1_CNTR_ID_BUS_ACCESS);
instructions = pmu_read_counter(BENCH_2_CNTR_ID);
printf("sum read: cycles: %llu instruct: %llu bus accesses: %llu\n\r", cycle_count, instructions, bus_accesses);
pmu_write_counter(BENCH_1_CNTR_ID_BUS_ACCESS, 0);
pmu_write_counter(BENCH_2_CNTR_ID, 0);
pmu_write_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES, 0);
for (int i = 0; i < ARRAY_SIZE; ++i) {
c[i] = a[i] + CONST*b[i];
}
cycle_count = pmu_read_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES);
bus_accesses = pmu_read_counter(BENCH_1_CNTR_ID_BUS_ACCESS);
instructions = pmu_read_counter(BENCH_2_CNTR_ID);
printf("triad read: cycles: %llu instruct: %llu bus accesses: %llu\n\r", cycle_count, instructions, bus_accesses);
--------------------------------------------------------------
\ No newline at end of file
/**
* Created by Dorel Coman on 30.04.18.
*
* This benchmarks are meant to be used for testing the MemGuard framework built on top of FreeRTOS
*/
#include "freertos/FreeRTOS.h"
#include "freertos/core/task.h"
#include "memguard/benchmark.h"
#include "memguard/perfmon.h"
#include "xil/xil_printf.h"
#include <xil/drivers/xttcps.h>
#include <xil/drivers/xscugic.h>
#include <freertos/FreeRTOSConfig.h>
#include <xil/xparameters.h>
#include <stdlib.h>
#include "xil/xil_cache.h"
#include "memguard/memguard.h"
uint64_t read_cycle_counter(void)
{
uint64_t ret;
asm volatile ("mrs %0, CNTPCT_EL0": "=r" (ret));
return ret;
}
#define printf xil_printf
#define QEMU 1
#define MASTER_CORE_ID 0U
#define MB_IN_BYTE (1024 * 1024)
#define SIZE_MEM_READ 16
#define TTC_TIMER_CORE_0_DEVICE_ID XPAR_XTTCPS_2_DEVICE_ID
#define TTC_TIMER_CORE_0_INTR_ID XPAR_XTTCPS_2_INTR
/* Priority of the ICI for the XScuGic_SetPriorityTriggerType() */
#define ICI_INT_PRIORITY 232
#define BENCHMARK_ICI_INT_ID 2
#define TIMER_HZ 4
#define TIMES_TO_COUNT (10 * TIMER_HZ)
#define NUMBER_OF_BENCHMARKS 17
#define ARRAY_TYPE double
#define ARRAY_SIZE 12000000 // 12 Million elements = 96 MB space * 3 arrays = 288 MB total
#define CONST 5
ARRAY_TYPE a[ARRAY_SIZE];
ARRAY_TYPE b[ARRAY_SIZE];
ARRAY_TYPE c[ARRAY_SIZE];
/* Array used for tracing the usage of bandwidth in each window*/
#define TIME_FRAMES_IN_ARRAY 80000
uint32_t trace_arr[TIME_FRAMES_IN_ARRAY];
/* Used for stopping the while loop when the timer has counted 10 seconds */
static volatile uint32_t not_stop_timer;
/* Used for counting how many fractions of time have occurred already and to stop the timer when it reaches 10 seconds*/
volatile uint32_t ticks;
/* It counts the cycles occured during a MemGuard time window */
volatile uint64_t window_cycle_count;
/* It counts the number of accesses to the main memory during 10 seconds */
volatile uint64_t bus_accesses;
/* It counts the number of instructions executed by the task while running during the benchark. The count start only
from the moment the task is scheduled and ends when the task is descheduled or there is a context switch*/
volatile uint64_t instructions;
/* It keeps track of how many reset window routines are ocurring during the execution of a benchmark */
volatile uint32_t window_count;
/* stores the core id */
uint32_t core_id;
/* We are counting the amount of cycles used by memguard during the executions of its functions,
interrupt handlers and task routines */
uint64_t memguard_cycle_usage;
extern XScuGic xInterruptController;
/* Instance of the Interrupt Controller */
static XScuGic *interrupt_controller;
/* Instance of the Timer */
static XTtcPs benchmark_timer;
void handler_timer_interrupt(void *callback_ref);
void benchmark_timer_setup();
void benchmark_slave_setup();
/**
*
*/
void _1_3_bench();
void _2_63_bench();
void _3_122_bench();
void _4_234_bench();
void _5_326_bench();
void _6_415_bench();
void _7_446_bench();
void _8_531_bench();
void _9_618_bench();
void _10_700_bench();
void _11_886_bench();
void _12_1101_bench();
void _13_1206_bench();
void _14_1550_bench();
void _15_1648_bench();
void _16_1866_bench();
void _17_2029_bench();
/* array of function used for accessing in sequence all the benchmark functions */
void (*bench_arr[])() = { _1_3_bench, _2_63_bench, _3_122_bench, _4_234_bench, _5_326_bench, _6_415_bench, _7_446_bench, _8_531_bench,
_9_618_bench, _10_700_bench, _11_886_bench, _12_1101_bench, _13_1206_bench, _14_1550_bench, _15_1648_bench,
_16_1866_bench, _17_2029_bench };
void init_benchmark()
{
core_id = pmu_get_core_id();
interrupt_controller = &xInterruptController;
/* setting up the benchmarks depending on the core */
if(core_id == MASTER_CORE_ID)
benchmark_timer_setup();
else
benchmark_slave_setup();
#if(QEMU == 0)
{
/* PMU counters are already enabled by MemGuard therefore we don't need to enable them again */
pmu_enable_counter_for_event(BENCH_1_CNTR_ID_BUS_ACCESS, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
pmu_enable_counter_for_event(BENCH_2_CNTR_ID_INSTR_EX, ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED);
pmu_enable_counter_for_event(BENCH_3_CNTR_ID_CLOCK_CYCLES, ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES);
/* This counter is used by the tracer to count how many cycles are taken by MemGuard during its routines*/
pmu_enable_counter_for_event(BENCH_4_CNTR_ID_CLOCK_CYCLES_OVER, ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES);
}
#endif
}
void start_benchmarks()
{
/* setting the random function, used for getting random indexes in array */
srand(pmu_read_cyclecount());
printf("\n\rstart benchmark\n\r");
/* Iterating over all benchmarks and measuring each of them for 10s */
for(int i=0; i < NUMBER_OF_BENCHMARKS; i++){
for (int j = 0; j < ARRAY_SIZE; ++j) {
a[j] = CONST;
b[j] = CONST;
c[j] = CONST;
}
for (int j=0; j < TIME_FRAMES_IN_ARRAY; j++)
trace_arr[j] = 0;
/* variable used for stoping the loop when the timer has counted 10 seconds */
not_stop_timer = 1;
/* resetting all the variables which keep count of the ocurring events */
ticks = 0;
window_cycle_count = 0;
bus_accesses = 0;
instructions = 0;
memguard_cycle_usage = 0;
window_count = 0;
#if(QEMU == 0)
{
pmu_write_counter(BENCH_3_CNTR_ID_CLOCK_CYCLES, 0);
}
#endif
if(core_id == MASTER_CORE_ID){
XTtcPs_ResetCounterValue(&benchmark_timer);
XTtcPs_Start(&benchmark_timer);
while(not_stop_timer){
(* bench_arr[i])();
}
} else {
while (not_stop_timer){
//(* bench_arr[15])();
}
}
if(core_id == MASTER_CORE_ID){
int val=0;
for(int j=0; j < 1000000; j++)
val = j;
}
double ipc = (double)instructions / (double)window_cycle_count;
uint32_t ipc_whole = (uint32_t) ipc;
uint32_t ipc_decimal = (uint32_t) ((ipc - ipc_whole) * 1000);
uint64_t bandwidth = bus_accesses / 10 * SIZE_MEM_READ / MB_IN_BYTE;
printf("\n\rID: %u bench: %d bus accesses: %llu IPC: %u.%u B/W %u MB/s\n\r",
core_id, (i+1), bus_accesses, ipc_whole, ipc_decimal, bandwidth);
double overhead = (double)memguard_cycle_usage / (double)window_cycle_count;
uint32_t overhead_whole = (uint32_t) overhead;
uint32_t overhead_decimal = (uint32_t) ((overhead - overhead_whole) * 1000);
printf("Tot us: %llu - memg us: %llu Overhead: %u.%u windows: %u \n\r",
window_cycle_count, memguard_cycle_usage, overhead_whole, overhead_decimal, window_count);
// if(core_id == MASTER_CORE_ID){
//
// for (int j=7500; j < 8000; j++)
// printf("%u ", trace_arr[j]);
// printf("\n\r");
// }
}
}
/****************************
* Tracing functions
****************************/