Commit 0d334ba1 authored by Dorel Coman's avatar Dorel Coman Committed by Oliver Horst
Browse files

memguard: in lab last review and fixes

parent 13acca65
......@@ -181,7 +181,6 @@ static void memguard_init_task(void *arg);
static int setup_inter_core_interrupts();
static int setup_pmu_overflow_interrupt();
static void memguard_timer_setup();
static void check_if_cores_are_ready();
static void periodic_timer_handler_master(void *callback_ref);
static void periodic_timer_handler_slave(void *callback_ref);
......@@ -253,7 +252,8 @@ void memguard_init()
Xil_DCacheDisable();
/* The global variables are initialized by the master core */
/* The global variables are initialized by the master core before the slave
cores are booted */
if(core_id == MASTER_CORE_ID){
atomic_init(&(memguard_info.cores_ready), 0);
atomic_init(&(memguard_info.global_budget), 0);
......@@ -263,10 +263,7 @@ void memguard_init()
Xil_AssertSetCallback(assert_print);
// pmu_init_counters();
//TODO: TAKE OUT
//pmu_enable_counter_for_event(MEMGUARD_CNTR_ID,
// ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
/* The task activated at the beginning of scheduling to setup the timer and
then deleted */
......@@ -380,7 +377,13 @@ BaseType_t memguard_can_task_resume(TaskHandle_t xTask)
can_task_resume = !task_info->tsk_susp_by_memguard;
} else {
can_task_resume = pdFALSE;
if (( xTask == xTaskGetIdleTaskHandle() ) ||
( xTask == xTimerGetTimerDaemonTaskHandle() ))
{
can_task_resume = pdTRUE;
} else {
can_task_resume = pdFALSE;
}
}
#if (INCLUDE_memguard_benchmark)
......@@ -547,17 +550,50 @@ static void memguard_init_task(void *arg)
int status;
int_controller_ptr = &xInterruptController;
/* */
pmu_init_counters();
pmu_enable_counter_for_event(MEMGUARD_CNTR_ID,
ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
/* */
status = setup_inter_core_interrupts();
configASSERT(status == XST_SUCCESS);
/* */
status = setup_pmu_overflow_interrupt();
configASSERT(status == XST_SUCCESS);
/* */
if(XScuGic_GetCpuID() == MASTER_CORE_ID){
memguard_timer_setup();
}
check_if_cores_are_ready();
/**
* The last core to finish MemGuard initialization issues the first MemGuard
* ICI interrupt to the master core, to initiate the first window. The shared
* variable "cores_ready" is used for this synchronization purpose.
*/
atomic_uint number_of_cores_ready;
atomic_init(&number_of_cores_ready, 0);
atomic_fetch_add(&(memguard_info.cores_ready), 1);
number_of_cores_ready = atomic_load(&(memguard_info.cores_ready));
/* Checking how many cores are already initialized */
if(number_of_cores_ready == NUMBER_CORES){
/* The scheduler is suspended in order to wait for memguard to be ready
on all cores calling the master core to start the framework because
everybody is ready setting the value of cores_ready to 0 so we are
sure that only one core is triggering the master to start */
status = XScuGic_SoftwareIntr(int_controller_ptr,
MASTER_MEMGUARD_ICI_INT_ID, MASTER_CORE_ICI_MASK);
configASSERT(status == XST_SUCCESS);
}
/* Delete MemGuard initialization task, as its job is done and it is not used anymore. */
vTaskDelete(NULL);
}
......@@ -638,10 +674,10 @@ static int setup_pmu_overflow_interrupt()
}
/* Setting the priority of the PMU interrupts to low value, in order to
make the PMU interrupts preemtable by the timer or ICI interrupts.
This precaution is taken in order to avoid overflow interrupts while the
window reset is already started. This situation would create problems in the
MemGuard workflow.
make the PMU interrupts preemtable by the MemGuard timer or MemGuard ICI
interrupts. This precaution is taken in order to avoid PMU overflow interrupts
to interrupt an ongoing window reset initiated by a MemGuard timer or ICI
interrupt. This situation would create problems in the MemGuard workflow.
Trigger type: 0b01 => Active HIGH level sensitive */
XScuGic_SetPriorityTriggerType(int_controller_ptr, PMU_INT_ID,
(u8)PMU_OVERFLOW_INT_PRIORITY, 0b01);
......@@ -653,41 +689,11 @@ static int setup_pmu_overflow_interrupt()
return XST_SUCCESS;
}
/**
* This function checks if all the cores are ready to start the framework,
* meaning they already started their scheduler and reached this point. The
* last core to reach this function will trigger the start of the first
* window. The shared variable "cores_ready" is used for this synchronisation
* purpose.
*/
static void check_if_cores_are_ready()
{
int status;
atomic_uint number_of_cores_ready;
atomic_init(&number_of_cores_ready, 0);
atomic_fetch_add(&(memguard_info.cores_ready), 1);
number_of_cores_ready = atomic_load(&(memguard_info.cores_ready));
/* checking how many cores are ready to start */
if(number_of_cores_ready == NUMBER_CORES){
/* The scheduler is suspended in order to wait for memguard to be ready
on all cores calling the master core to start the framework because
everybody is ready setting the value of cores_ready to 0 so we are
sure that only one core is triggering the master to start */
status = XScuGic_SoftwareIntr(int_controller_ptr,
MASTER_MEMGUARD_ICI_INT_ID, MASTER_CORE_ICI_MASK);
configASSERT(status == XST_SUCCESS);
}
}
/**
* This function sets up the window timer. It is supposed to be executed
* inside a Task context in order to execute properly. We are using an
* independent timer which allows more fine grained observation of the memory
* bandwidth (eg. 1ms time window) and allows a window to span between 2
* bandwidth (e.g., 1ms time window) and allows a window to span between 2
* different ticks. For more information refer to the original paper at the
* Subsection 6.2.
*/
......@@ -868,6 +874,7 @@ static void memguard_reset_window_routine(void *pvParameter1,
* This functions handles the PMU overflow interrupt and it's called by the
* GIC interrupt when a counter overflows.
*
* The case have to be excuted in their current order, never re-arrange them.
* @param callback_ref not used
*/
static void memguard_overflow_interrupt_handler(void *callback_ref)
......@@ -881,80 +888,99 @@ static void memguard_overflow_interrupt_handler(void *callback_ref)
It may happend that another counter has overflowed triggering the
interrupt handler, but our handler is not responsible for that counter. */
if(!pmu_counter_has_overflowed(MEMGUARD_CNTR_ID)){
#if (INCLUDE_memguard_benchmark == 1)
stop_memguard_trace();
#endif
return;
goto stop_trace;
}
pmu_clear_interrupt(MEMGUARD_CNTR_ID);
TaskHandle_t curr_task = xTaskGetCurrentTaskHandle();
memguard_task_info_t *task_info = (memguard_task_info_t *)
pvTaskGetMemguardTaskInfo(curr_task);
/* In case an overflow occurs while the TimerTask, IdleTask or MemGuard
are executing we return. We are not monitoring them. No other task
will reach this point if its task_info is NULL. */
if(task_info == NULL)
return;
/* Adding the dynamic assigned quota, which was all used, to the
currently used quota to keep track of total quota used by the task in
the current window */
task_info->curr_used_budget += task_info->cur_ass_budget;
/* CASE 1:
* An overflow occured while the TimerTask, IdleTask or MemGuard
was executing, hence, we simple return, as we are not monitoring
them. No other task will reach this point if its task_info is NULL.
*/
if(task_info == NULL){
goto stop_trace;
}
/* If there is still global_budget we can try to take a part of it and
assign it to the task */
/* CASE 2:
* Check whether there is still globally shared budget available (managed in
* variable global_budget). If yes, we can try to allocate parts of it and
* assign it to the current task.
*/
uint32_t reclaimed = reclaim_budget(task_info);
if(reclaimed > 0){
task_info->counter_val = mask_value(reclaimed);
/* We were able to reclaim additional budget and assign it to the
* current task.
*/
task_info->counter_val = mask_value(reclaimed);
task_info->cur_ass_budget = reclaimed;
/* There will be no context switch where the counter would be
written, therefore we have to write it here */
pmu_write_counter(MEMGUARD_CNTR_ID, task_info->counter_val);
#if (INCLUDE_memguard_benchmark == 1)
{
stop_memguard_trace();
}
#endif
return;
goto stop_trace;
}
/* We donated some budget at the beginning of the window and we already
finished the assigned bandwidth. The problem is that the donated budget
was already used by other tasks and we cannot claim more bandwidth which
should be of our task. We allow the task to continue, expecting it to use
some bandwidth until the end of the window. */
/* CASE 3:
* Even though our current task was interrupted by MemGuard, because we
* exceeded our assigned budget, we still might be below our statically
* assigned budget. This might happen, because we donated some budget
* at the beginning of the window, but we already finished the dynamically
* assigned bandwidth. As we were not able to reclaim additional bandwidth,
* we face the issue that our donated budget was already used by other
* tasks. The heuristic presented by the original paper suggests to let the
* task continue its execution anyhow. This might gives the current task
* the chance to still use its full statically assigned bandwidth. At the
* beginning of the next window the prediction for the dynamic memory
* budgeting will be adjusted accordingly.
*/
if (task_info->curr_used_budget < task_info->ass_budget){
/* The task is free to try to use some bandwidth until the end of
the window; we are collecting info of how much bandwidth it is
using to know how much to allocate it at the beginning of the
next window. Hence, we reset the PMU counter to zero for the
overflowed task. */
task_info->cur_ass_budget = 0;
task_info->budget_finished = 1;
task_info->counter_val = 0;
pmu_write_counter(MEMGUARD_CNTR_ID, task_info->counter_val);
#if (INCLUDE_memguard_benchmark == 1)
stop_memguard_trace();
#endif
/* There will be no context switch where the counter would be
written, therefore we have to write it here */
pmu_write_counter(MEMGUARD_CNTR_ID, task_info->counter_val);
/* The task will be free to try to use some bandwidth until the end of
the window; we are collecting info of how much bandwidth it is
using to know how much to allocate it at the beginning of the
next window */
return;
goto stop_trace;
}
/* Incrementing the budget_used_in_window with the budget used by the
current task */
atomic_fetch_add(&(memguard_info.budget_used_in_window),
task_info->curr_used_budget);
atomic_uint sum_of_quota_used = atomic_load(&(memguard_info.budget_used_in_window));
/* CASE 4:
* In case proportional sharing is enabled, we check whether the bandwidth
* consumed by all tasks on all cores during this window already exceeds
* the guaranteed minimal bandwidth. If yes, we issue a window reset to start
* freshly with a new budget share for all tasks/cores. We reach this situation
* if the tasks from all cores have finished their budget.
*/
#if( PROPORTIONAL_SHARE == 1)
{
/* Incrementing the budget_used_in_window with the budget used by the
* current task
*/
atomic_fetch_add(&(memguard_info.budget_used_in_window),
task_info->curr_used_budget);
atomic_uint sum_of_quota_used = atomic_load(&(memguard_info.budget_used_in_window));
/* If total budget used exceeds the minimum guaranteed bandwidth then
a new time window has to be started. We reach this situation if the
tasks from all the cores have finished their budget.*/
a new time window has to be started. */
if (sum_of_quota_used >= MIN_GUARANTEED_BUDGET){
int status;
......@@ -963,20 +989,24 @@ static void memguard_overflow_interrupt_handler(void *callback_ref)
MASTER_MEMGUARD_ICI_INT_ID, MASTER_CORE_ICI_MASK);
configASSERT(status == XST_SUCCESS);
#if (INCLUDE_memguard_benchmark)
stop_memguard_trace();
#endif
return;
goto stop_trace;
}
}
#endif
/* Suspend the task because it has used all its budget */
/* CASE 5:
* The current task exceeded its currently assigned memory bandwidth, failed
* on reclaiming additional budget, even consumed all of its statically
* assigned budget. Thus, we have to suspend the task, as it has used all its
* budget.
*/
xTimerPendFunctionCallFromISR(suspend_task_routine, curr_task, 0, NULL);
portYIELD_FROM_ISR(pdTRUE);
#if (INCLUDE_memguard_benchmark == 1)
stop_trace:
#if (INCLUDE_memguard_benchmark == 1)
stop_memguard_trace();
#endif
}
......@@ -994,6 +1024,12 @@ uint32_t reclaim_budget(memguard_task_info_t *task_info)
atomic_uint new_global_budget;
atomic_init(&new_global_budget, 0);
/* Adding the dynamic assigned quota, which was all used, to the
currently used quota to keep track of total quota used by the task in
the current window */
task_info->curr_used_budget += task_info->cur_ass_budget;
/* If the task has already used more than its ass_budget we are
assigning to it a small value called MIN_ASSIGNED_BUDGET of value
equivalent to 25 MB/s */
......@@ -1032,6 +1068,7 @@ uint32_t reclaim_budget(memguard_task_info_t *task_info)
trace_reclaim_exec(reclaimed);
}
#endif
break;
}
}
......
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2017 Xilinx, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of Mentor Graphics Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**************************************************************************
* FILE NAME
*
* platform_info.c
*
* DESCRIPTION
*
* This file define platform specific data and implements APIs to set
* platform specific information for OpenAMP.
*
* This platform info is used to implement APU to APU remoteproc and
* rpmsg.
*
**************************************************************************/
#include "openamp/hil.h"
#include "metal/atomic.h"
#include "platform_info.h"
#include "freertos/FreeRTOSConfig.h"
#define IPI_BASE_ADDR XPAR_XIPIPSU_0_BASE_ADDRESS /* IPI base address*/
#define IPI_CHN_BITMASK 0x01000000 /* IPI channel bit mask for IPI from/to
APU */
#define APU_CPU_ID 1 /* APU remote CPU Index. We only talk to one CPU
* in the example. We set the CPU index to 0. */
#define SHM_BASE_ADDR 0xFFFC0100
#define SHM_SIZE 0x10000
/* IPI information used by remoteproc operations.
*/
struct ipi_info {
const char *name; /* IPI device name */
const char *bus_name; /* IPI bus name */
struct meta_device *dev; /* IPI metal device */
struct metal_io_region *io; /* IPI metal IO region */
metal_phys_addr_t paddr; /* IPI registers base address */
uint32_t ipi_chn_mask; /* IPI channel mask */
int registered; /* used internally by RPU to APU remoteproc to mark
* if the IPI interrup has been registered */
atomic_int sync; /* used internally by RPU to APU remoteproc to mark
* if there is kick from the remote */
};
/* processor operations for hil_proc from a53 to a53. It defines
* notification operation and remote processor managementi operations. */
extern struct hil_platform_ops zynqmp_a53_a53_proc_ops;
/* IPI information definition. It is used in the RPU to APU remoteproc
* operations. The fields name, bus_name, dev and io are NULL because they
* are set by remoteproc operations internally later. */
static struct ipi_info chn_ipi_info[] = {
{NULL, NULL, NULL, NULL, IPI_BASE_ADDR, IPI_CHN_BITMASK, 0, 0},
};
#ifdef MASTER_CORE_TRUE
#define MASTER_CORE 1
#else
#define MASTER_CORE 0
#endif
#if (MASTER_CORE == 1)
const struct firmware_info fw_table[] =
{
{"rpc",
(unsigned long)(void *)&_binary_amp_remote_elf_start,
(unsigned long)(void *)&_binary_amp_remote_elf_end},
};
#else
const struct firmware_info fw_table[] =
{
{"rpc", 0, 0},
};
#endif
const int fw_table_size = sizeof(fw_table)/sizeof(struct firmware_info);
const metal_phys_addr_t shm_phy = SHM_BASE_ADDR;
struct metal_device shm_dev = {
.name = "shm",
.bus = NULL,
.num_regions = 1,
.regions = {
{
.virt = (void *) SHM_BASE_ADDR,
.physmap = &shm_phy,
.size = SHM_SIZE,
.page_shift = (-1UL),
.page_mask = (-1UL),
.mem_flags = 0UL,
.ops = {NULL},
}
},
.node = {NULL},
.irq_num = 0,
.irq_info = NULL,
};
struct hil_proc *platform_create_proc(int proc_index)
{
(void) proc_index;
/* structure to represent a remote processor. It encapsulates the
* shared memory and notification info required for inter processor
* communication. */
struct hil_proc *proc;
proc = hil_create_proc(&zynqmp_a53_a53_proc_ops, (unsigned long) proc_index, NULL);
if (!proc)
return NULL;
/*************************************************************
* Set VirtIO device and vrings notification private data to
* hil_proc.
*************************************************************/
/* Set VirtIO device nofication private data. It will be used when it
* needs to notify the remote on the virtio device status change. */
hil_set_vdev_ipi(proc, 0,
IPI_IRQ_VECT_ID, (void *)&chn_ipi_info[0]);
/* Set vring 0 nofication private data. */
hil_set_vring_ipi(proc, 0,
IPI_IRQ_VECT_ID, (void *)&chn_ipi_info[0]);
/* Set vring 1 nofication private data. */
hil_set_vring_ipi(proc, 1,
IPI_IRQ_VECT_ID, (void *)&chn_ipi_info[0]);
/* Set name of RPMsg channel 0 */
hil_set_rpmsg_channel(proc, 0, RPMSG_CHAN_NAME);
metal_register_generic_device(&shm_dev);
hil_set_shm(proc, "generic", "shm", 0, SHM_SIZE);
return proc;
}
#ifndef PLATFORM_INFO_H_
#define PLATFORM_INFO_H_
#include "openamp/hil.h"
/* Interrupt vectors */
#define IPI_IRQ_VECT_ID XPAR_XIPIPSU_0_INT_ID
#define RPMSG_CHAN_NAME "rpmsg-openamp-demo-channel"
#ifdef MASTER_CORE_TRUE
#define MASTER_CORE 1
#endif
#if(MASTER_CORE == 1)
/* Firmware table and binary remoteproc firmware symbols. */
extern unsigned char _binary_amp_remote_elf_start;
extern unsigned char _binary_amp_remote_elf_end;
#endif
struct hil_proc *platform_create_proc(int proc_index);
#endif /* PLATFORM_INFO_H_ */
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2015 Xilinx, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of Mentor Graphics Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* This file populates resource table for BM remote
* for use by the Linux Master */
#include "openamp/open_amp.h"
#include "rsc_table.h"
/* Place resource table in special ELF section */
#define __section_t(S) __attribute__((__section__(#S)))
#define __resource __section_t(.resource_table)
#define RPMSG_IPU_C0_FEATURES 1
/* VirtIO rpmsg device id */
#define VIRTIO_ID_RPMSG_ 7
/* Remote supports Name Service announcement */
#define VIRTIO_RPMSG_F_NS 0
#define NUM_VRINGS 0x02
#define VRING_ALIGN 0x1000
#define RING_TX 0x3ED40000
#define RING_RX 0x3ED44000
#define VRING_SIZE 256
#define NUM_TABLE_ENTRIES 1
struct remote_resource_table __resource resources = {
/* Version */
1,
/* NUmber of table entries */
NUM_TABLE_ENTRIES,
/* reserved fields */
{0, 0,},
/* Offsets of rsc entries */
{
offsetof(struct remote_resource_table, rpmsg_vdev),
},
/* Virtio device entry */
{RSC_VDEV, VIRTIO_ID_RPMSG_, 0, RPMSG_IPU_C0_FEATURES, 0, 0, 0,
NUM_VRINGS, {0, 0},
},
/* Vring rsc entry - part of vdev rsc entry */
{RING_TX, VRING_ALIGN, VRING_SIZE, 1, 0},
{RING_RX, VRING_ALIGN, VRING_SIZE, 2, 0},
};
void *get_resource_table (int rsc_id, int *len)
{
(void) rsc_id;
*len = sizeof(resources);
return &resources;
}
/*
* Copyright (c) 2014, Mentor Graphics Corporation
* All rights reserved.
* Copyright (c) 2015 Xilinx, Inc. All rights reserved.