Commit 3ba1c6b1 authored by Oliver Horst's avatar Oliver Horst
Browse files

[chg] Prefixed macro definitions

parent e0951ba6
#ifndef XIL_CPU_H
#define XIL_CPU_H
#define CORE_MASK(x) ((uintptr_t)(1u << x))
#define XCORE_MASK(x) ((uintptr_t)(1u << x))
extern void __AMP_NUM_CORES;
extern void __AMP_CORE_MASK;
extern void __AMP_SLAVE_MASK;
#define AMP_NUM_CORES_U64 ((uintptr_t)(&__AMP_NUM_CORES))
#define AMP_SLAVE_MASK_U64 ((uintptr_t)(&__AMP_SLAVE_MASK))
#define AMP_CORE_MASK_U64 ((uintptr_t)(&__AMP_CORE_MASK))
#define XAMP_NUM_CORES_U64 ((uintptr_t)(&__AMP_NUM_CORES))
#define XAMP_SLAVE_MASK_U64 ((uintptr_t)(&__AMP_SLAVE_MASK))
#define XAMP_CORE_MASK_U64 ((uintptr_t)(&__AMP_CORE_MASK))
#define Xil_CpuDisableInterrupts() \
__asm volatile ( "MSR DAIFSET, #2" ::: "memory" ); \
......
......@@ -45,7 +45,7 @@ void Xil_CpuBarrier()
/* We mark our core as ready to sync by flipping the corresponding bit in
* the barrier 1 mask. */
atomic_fetch_xor( &XCpuBarrierMask1, CORE_MASK( uxCpuId ) );
atomic_fetch_xor( &XCpuBarrierMask1, XCORE_MASK( uxCpuId ) );
/* Test whether all cores are ready to sync. If not we are entering the low
* power state and stall the execution until the last core wakes all other
......@@ -77,14 +77,14 @@ void Xil_CpuBarrier()
/* We mark our core as synced by flipping the corresponding bit in
* the barrier 1 mask. */
atomic_fetch_xor( &XCpuBarrierMask2, CORE_MASK( uxCpuId ) );
atomic_fetch_xor( &XCpuBarrierMask2, XCORE_MASK( uxCpuId ) );
/* Test whether all cores are synced. If not we are entering the low power
* state and stall the execution until the last core wakes all other cores
* again. */
while ( 1 )
{
if ( mask1 ^ atomic_load( &XCpuBarrierMask2 ) != ( AMP_CORE_MASK_U64 ) )
if ( mask1 ^ atomic_load( &XCpuBarrierMask2 ) != ( XAMP_CORE_MASK_U64 ) )
{
Xil_CpuWaitForEvent();
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment