Skip to content

Commit

Permalink
ArmPkg/ArmLib: Drop set/way Dcache operations
Browse files Browse the repository at this point in the history
Cache maintenance operations by set/way are not broadcast, and operate
on individual architected caches, making them suitable only for
en/disabling cache levels, which is the job of secure firmware, to be
carried out while the CPU in question is not taking part in the
cache coherency protocol.

Managing the clean/dirty state of a memory range can only be done using
cache maintenance by virtual address.

So drop the set/way handling from ArmLib for ARM and AARCH64, as there
is no context where it can be used correctly from EDK2.

Signed-off-by: Ard Biesheuvel <[email protected]>
  • Loading branch information
ardbiesheuvel authored and mergify[bot] committed Sep 13, 2024
1 parent bec02ea commit 6706fe6
Show file tree
Hide file tree
Showing 6 changed files with 0 additions and 306 deletions.
55 changes: 0 additions & 55 deletions ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,61 +18,6 @@
#include "AArch64Lib.h"
#include "ArmLibPrivate.h"

VOID
AArch64DataCacheOperation (
IN AARCH64_CACHE_OPERATION DataCacheOperation
)
{
UINTN SavedInterruptState;

SavedInterruptState = ArmGetInterruptState ();
ArmDisableInterrupts ();

AArch64AllDataCachesOperation (DataCacheOperation);

ArmDataSynchronizationBarrier ();

if (SavedInterruptState) {
ArmEnableInterrupts ();
}
}

VOID
EFIAPI
ArmInvalidateDataCache (
VOID
)
{
ASSERT (!ArmMmuEnabled ());

ArmDataSynchronizationBarrier ();
AArch64DataCacheOperation (ArmInvalidateDataCacheEntryBySetWay);
}

VOID
EFIAPI
ArmCleanInvalidateDataCache (
VOID
)
{
ASSERT (!ArmMmuEnabled ());

ArmDataSynchronizationBarrier ();
AArch64DataCacheOperation (ArmCleanInvalidateDataCacheEntryBySetWay);
}

VOID
EFIAPI
ArmCleanDataCache (
VOID
)
{
ASSERT (!ArmMmuEnabled ());

ArmDataSynchronizationBarrier ();
AArch64DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
}

/**
Check whether the CPU supports the GIC system register interface (any version)
Expand Down
27 changes: 0 additions & 27 deletions ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,33 +11,6 @@
#ifndef AARCH64_LIB_H_
#define AARCH64_LIB_H_

typedef VOID (*AARCH64_CACHE_OPERATION)(
UINTN
);

VOID
AArch64AllDataCachesOperation (
IN AARCH64_CACHE_OPERATION DataCacheOperation
);

VOID
EFIAPI
ArmInvalidateDataCacheEntryBySetWay (
IN UINTN SetWayFormat
);

VOID
EFIAPI
ArmCleanDataCacheEntryBySetWay (
IN UINTN SetWayFormat
);

VOID
EFIAPI
ArmCleanInvalidateDataCacheEntryBySetWay (
IN UINTN SetWayFormat
);

UINTN
EFIAPI
ArmReadIdAA64Dfr0 (
Expand Down
75 changes: 0 additions & 75 deletions ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
Original file line number Diff line number Diff line change
Expand Up @@ -43,22 +43,6 @@ ASM_FUNC(ArmCleanInvalidateDataCacheEntryByMVA)
dc civac, x0 // Clean and invalidate single data cache line
ret


ASM_FUNC(ArmInvalidateDataCacheEntryBySetWay)
dc isw, x0 // Invalidate this line
ret


ASM_FUNC(ArmCleanInvalidateDataCacheEntryBySetWay)
dc cisw, x0 // Clean and Invalidate this line
ret


ASM_FUNC(ArmCleanDataCacheEntryBySetWay)
dc csw, x0 // Clean this line
ret


ASM_FUNC(ArmInvalidateInstructionCache)
ic iallu // Invalidate entire instruction cache
dsb sy
Expand Down Expand Up @@ -257,65 +241,6 @@ ASM_FUNC(ArmDisableBranchPrediction)
ret


ASM_FUNC(AArch64AllDataCachesOperation)
// We can use regs 0-7 and 9-15 without having to save/restore.
// Save our link register on the stack. - The stack must always be quad-word aligned
stp x29, x30, [sp, #-16]!
mov x29, sp
mov x1, x0 // Save Function call in x1
mrs x6, clidr_el1 // Read EL1 CLIDR
and x3, x6, #0x7000000 // Mask out all but Level of Coherency (LoC)
lsr x3, x3, #23 // Left align cache level value - the level is shifted by 1 to the
// right to ease the access to CSSELR and the Set/Way operation.
cbz x3, L_Finished // No need to clean if LoC is 0
mov x10, #0 // Start clean at cache level 0

Loop1:
add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info
lsr x12, x6, x2 // bottom 3 bits are the Cache type for this level
and x12, x12, #7 // get those 3 bits alone
cmp x12, #2 // what cache at this level?
b.lt L_Skip // no cache or only instruction cache at this level
msr csselr_el1, x10 // write the Cache Size selection register with current level (CSSELR)
isb // isb to sync the change to the CacheSizeID reg
mrs x12, ccsidr_el1 // reads current Cache Size ID register (CCSIDR)
and x2, x12, #0x7 // extract the line length field
add x2, x2, #4 // add 4 for the line length offset (log2 16 bytes)
mov x4, #0x400
sub x4, x4, #1
and x4, x4, x12, lsr #3 // x4 is the max number on the way size (right aligned)
clz w5, w4 // w5 is the bit position of the way size increment
mov x7, #0x00008000
sub x7, x7, #1
and x7, x7, x12, lsr #13 // x7 is the max number of the index size (right aligned)

Loop2:
mov x9, x4 // x9 working copy of the max way size (right aligned)

Loop3:
lsl x11, x9, x5
orr x0, x10, x11 // factor in the way number and cache number
lsl x11, x7, x2
orr x0, x0, x11 // factor in the index number

blr x1 // Goto requested cache operation

subs x9, x9, #1 // decrement the way number
b.ge Loop3
subs x7, x7, #1 // decrement the index
b.ge Loop2
L_Skip:
add x10, x10, #2 // increment the cache number
cmp x3, x10
b.gt Loop1

L_Finished:
dsb sy
isb
ldp x29, x30, [sp], #0x10
ret


ASM_FUNC(ArmDataMemoryBarrier)
dmb sy
ret
Expand Down
55 changes: 0 additions & 55 deletions ArmPkg/Library/ArmLib/Arm/ArmV7Lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,61 +18,6 @@
#include "ArmV7Lib.h"
#include "ArmLibPrivate.h"

VOID
ArmV7DataCacheOperation (
IN ARM_V7_CACHE_OPERATION DataCacheOperation
)
{
UINTN SavedInterruptState;

SavedInterruptState = ArmGetInterruptState ();
ArmDisableInterrupts ();

ArmV7AllDataCachesOperation (DataCacheOperation);

ArmDataSynchronizationBarrier ();

if (SavedInterruptState) {
ArmEnableInterrupts ();
}
}

VOID
EFIAPI
ArmInvalidateDataCache (
VOID
)
{
ASSERT (!ArmMmuEnabled ());

ArmDataSynchronizationBarrier ();
ArmV7DataCacheOperation (ArmInvalidateDataCacheEntryBySetWay);
}

VOID
EFIAPI
ArmCleanInvalidateDataCache (
VOID
)
{
ASSERT (!ArmMmuEnabled ());

ArmDataSynchronizationBarrier ();
ArmV7DataCacheOperation (ArmCleanInvalidateDataCacheEntryBySetWay);
}

VOID
EFIAPI
ArmCleanDataCache (
VOID
)
{
ASSERT (!ArmMmuEnabled ());

ArmDataSynchronizationBarrier ();
ArmV7DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
}

/**
Check whether the CPU supports the GIC system register interface (any version)
Expand Down
27 changes: 0 additions & 27 deletions ArmPkg/Library/ArmLib/Arm/ArmV7Lib.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,33 +23,6 @@
#define ID_MMFR0_SHR_IMP_HW_COHERENT 1
#define ID_MMFR0_SHR_IGNORED 0xf

typedef VOID (*ARM_V7_CACHE_OPERATION)(
UINT32
);

VOID
ArmV7AllDataCachesOperation (
IN ARM_V7_CACHE_OPERATION DataCacheOperation
);

VOID
EFIAPI
ArmInvalidateDataCacheEntryBySetWay (
IN UINTN SetWayFormat
);

VOID
EFIAPI
ArmCleanDataCacheEntryBySetWay (
IN UINTN SetWayFormat
);

VOID
EFIAPI
ArmCleanInvalidateDataCacheEntryBySetWay (
IN UINTN SetWayFormat
);

/** Reads the ID_MMFR4 register.
@return The contents of the ID_MMFR4 register.
Expand Down
67 changes: 0 additions & 67 deletions ArmPkg/Library/ArmLib/Arm/ArmV7Support.S
Original file line number Diff line number Diff line change
Expand Up @@ -42,20 +42,6 @@ ASM_FUNC(ArmCleanInvalidateDataCacheEntryByMVA)
bx lr


ASM_FUNC(ArmInvalidateDataCacheEntryBySetWay)
mcr p15, 0, r0, c7, c6, 2 @ Invalidate this line
bx lr


ASM_FUNC(ArmCleanInvalidateDataCacheEntryBySetWay)
mcr p15, 0, r0, c7, c14, 2 @ Clean and Invalidate this line
bx lr


ASM_FUNC(ArmCleanDataCacheEntryBySetWay)
mcr p15, 0, r0, c7, c10, 2 @ Clean this line
bx lr

ASM_FUNC(ArmInvalidateInstructionCache)
mcr p15,0,R0,c7,c5,0 @Invalidate entire instruction cache
dsb
Expand Down Expand Up @@ -171,59 +157,6 @@ ASM_FUNC(ArmSetHighVectors)
isb
bx LR

ASM_FUNC(ArmV7AllDataCachesOperation)
stmfd SP!,{r4-r12, LR}
mov R1, R0 @ Save Function call in R1
mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR
ands R3, R6, #0x7000000 @ Mask out all but Level of Coherency (LoC)
mov R3, R3, LSR #23 @ Cache level value (naturally aligned)
beq L_Finished
mov R10, #0

Loop1:
add R2, R10, R10, LSR #1 @ Work out 3xcachelevel
mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level
and R12, R12, #7 @ get those 3 bits alone
cmp R12, #2
blt L_Skip @ no cache or only instruction cache at this level
mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction
isb @ isb to sync the change to the CacheSizeID reg
mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)
and R2, R12, #0x7 @ extract the line length field
add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)
@ ldr R4, =0x3FF
mov R4, #0x400
sub R4, R4, #1
ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)
clz R5, R4 @ R5 is the bit position of the way size increment
@ ldr R7, =0x00007FFF
mov R7, #0x00008000
sub R7, R7, #1
ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)

Loop2:
mov R9, R4 @ R9 working copy of the max way size (right aligned)

Loop3:
orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11
orr R0, R0, R7, LSL R2 @ factor in the index number

blx R1

subs R9, R9, #1 @ decrement the way number
bge Loop3
subs R7, R7, #1 @ decrement the index
bge Loop2
L_Skip:
add R10, R10, #2 @ increment the cache number
cmp R3, R10
bgt Loop1

L_Finished:
dsb
ldmfd SP!, {r4-r12, lr}
bx LR

ASM_FUNC(ArmDataMemoryBarrier)
dmb
bx LR
Expand Down

0 comments on commit 6706fe6

Please sign in to comment.