Skip to content

Commit

Permalink
core: riscv: Refactor trap handler
Browse files Browse the repository at this point in the history
In current trap handler, we always save the trapped context into struct
thread_trap_regs, and copy to other structures to handle specific traps.
For example, we invoke copy_trap_to_scall() to copy the context from
thread_trap_regs to thread_scall_regs to handle system call, and copy
back again after handling. Obviously, there are some unnecessary copies.

This commit determines the specific context structure in early trap
handling, by checking the value of CSR XCAUSE. For system call, we use
kernel stack to save the context (see ecall_from_user). For interrupts,
we use struct thread_ctx_regs to save the context (see
interrupt_from_user and interrupt_from_kernel) For other exceptions
and aborts, we use abort stack or tamporary stack prepared in
thread_core_local structure (see abort_from_user). The unnecessary
copies are removed.

Signed-off-by: Alvin Chang <alvinga@andestech.com>
  • Loading branch information
gagachang committed Apr 26, 2024
1 parent 7950274 commit 1935e2c
Show file tree
Hide file tree
Showing 5 changed files with 483 additions and 292 deletions.
36 changes: 4 additions & 32 deletions core/arch/riscv/include/kernel/thread_arch.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,23 +83,21 @@ struct thread_abort_regs {
unsigned long t4;
unsigned long t5;
unsigned long t6;
unsigned long epc;
unsigned long status;
unsigned long ie;
unsigned long cause;
unsigned long epc;
unsigned long tval;
unsigned long satp;
};
} __aligned(16);

struct thread_trap_regs {
struct thread_scall_regs {
unsigned long ra;
unsigned long sp;
unsigned long gp;
unsigned long tp;
unsigned long t0;
unsigned long t1;
unsigned long t2;
unsigned long s0;
unsigned long s1;
unsigned long a0;
unsigned long a1;
unsigned long a2;
Expand All @@ -108,16 +106,6 @@ struct thread_trap_regs {
unsigned long a5;
unsigned long a6;
unsigned long a7;
unsigned long s2;
unsigned long s3;
unsigned long s4;
unsigned long s5;
unsigned long s6;
unsigned long s7;
unsigned long s8;
unsigned long s9;
unsigned long s10;
unsigned long s11;
unsigned long t3;
unsigned long t4;
unsigned long t5;
Expand All @@ -127,22 +115,6 @@ struct thread_trap_regs {
unsigned long ie;
} __aligned(16);

struct thread_scall_regs {
unsigned long a0;
unsigned long a1;
unsigned long a2;
unsigned long a3;
unsigned long a4;
unsigned long a5;
unsigned long a6;
unsigned long a7;
unsigned long t0;
unsigned long t1;
unsigned long ra;
unsigned long sp;
unsigned long status;
} __aligned(16);

struct thread_ctx_regs {
unsigned long ra;
unsigned long sp;
Expand Down
5 changes: 2 additions & 3 deletions core/arch/riscv/include/kernel/thread_private_arch.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,8 @@ struct thread_user_mode_rec {

extern long thread_user_kcode_offset;

void thread_trap_handler(long cause, unsigned long epc,
struct thread_trap_regs *regs,
bool user);
void thread_interrupt_handler(unsigned long cause,
struct thread_ctx_regs *regs);
/*
* Initializes TVEC for current hart. Called by thread_init_per_cpu()
*/
Expand Down
43 changes: 27 additions & 16 deletions core/arch/riscv/kernel/asm-defines.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ DEFINES
DEFINE(THREAD_CTX_REG_IE, offsetof(struct thread_ctx_regs, ie));
DEFINE(THREAD_CTX_REG_RA, offsetof(struct thread_ctx_regs, ra));
DEFINE(THREAD_CTX_REG_SP, offsetof(struct thread_ctx_regs, sp));
DEFINE(THREAD_CTX_REG_GP, offsetof(struct thread_ctx_regs, gp));
DEFINE(THREAD_CTX_REG_TP, offsetof(struct thread_ctx_regs, tp));
DEFINE(THREAD_CTX_REG_T0, offsetof(struct thread_ctx_regs, t0));
DEFINE(THREAD_CTX_REG_S0, offsetof(struct thread_ctx_regs, s0));
DEFINE(THREAD_CTX_REG_A0, offsetof(struct thread_ctx_regs, a0));
Expand All @@ -63,27 +65,36 @@ DEFINES
offsetof(struct thread_user_mode_rec, x[6]));
DEFINE(THREAD_USER_MODE_REC_SIZE, sizeof(struct thread_user_mode_rec));

/* struct thread_trap_regs */
DEFINE(THREAD_TRAP_REG_SP, offsetof(struct thread_trap_regs, sp));
DEFINE(THREAD_TRAP_REG_RA, offsetof(struct thread_trap_regs, ra));
DEFINE(THREAD_TRAP_REG_GP, offsetof(struct thread_trap_regs, gp));
DEFINE(THREAD_TRAP_REG_TP, offsetof(struct thread_trap_regs, tp));
DEFINE(THREAD_TRAP_REG_T0, offsetof(struct thread_trap_regs, t0));
DEFINE(THREAD_TRAP_REG_S0, offsetof(struct thread_trap_regs, s0));
DEFINE(THREAD_TRAP_REG_A0, offsetof(struct thread_trap_regs, a0));
DEFINE(THREAD_TRAP_REG_T3, offsetof(struct thread_trap_regs, t3));
DEFINE(THREAD_TRAP_REG_EPC, offsetof(struct thread_trap_regs, epc));
DEFINE(THREAD_TRAP_REG_STATUS,
offsetof(struct thread_trap_regs, status));
DEFINE(THREAD_TRAP_REG_IE, offsetof(struct thread_trap_regs, ie));
DEFINE(THREAD_TRAP_REGS_SIZE, sizeof(struct thread_trap_regs));
/* struct thread_abort_regs */
DEFINE(THREAD_ABT_REG_RA, offsetof(struct thread_abort_regs, ra));
DEFINE(THREAD_ABT_REG_SP, offsetof(struct thread_abort_regs, sp));
DEFINE(THREAD_ABT_REG_GP, offsetof(struct thread_abort_regs, gp));
DEFINE(THREAD_ABT_REG_TP, offsetof(struct thread_abort_regs, tp));
DEFINE(THREAD_ABT_REG_T0, offsetof(struct thread_abort_regs, t0));
DEFINE(THREAD_ABT_REG_S0, offsetof(struct thread_abort_regs, s0));
DEFINE(THREAD_ABT_REG_A0, offsetof(struct thread_abort_regs, a0));
DEFINE(THREAD_ABT_REG_S2, offsetof(struct thread_abort_regs, s2));
DEFINE(THREAD_ABT_REG_T3, offsetof(struct thread_abort_regs, t3));
DEFINE(THREAD_ABT_REG_EPC, offsetof(struct thread_abort_regs, epc));
DEFINE(THREAD_ABT_REG_STATUS,
offsetof(struct thread_abort_regs, status));
DEFINE(THREAD_ABT_REG_IE, offsetof(struct thread_abort_regs, ie));
DEFINE(THREAD_ABT_REG_CAUSE, offsetof(struct thread_abort_regs, cause));
DEFINE(THREAD_ABT_REG_TVAL, offsetof(struct thread_abort_regs, tval));
DEFINE(THREAD_ABT_REGS_SIZE, sizeof(struct thread_abort_regs));

/* struct thread_scall_regs */
DEFINE(THREAD_SCALL_REG_STATUS,
offsetof(struct thread_scall_regs, status));
DEFINE(THREAD_SCALL_REG_RA, offsetof(struct thread_scall_regs, ra));
DEFINE(THREAD_SCALL_REG_SP, offsetof(struct thread_scall_regs, sp));
DEFINE(THREAD_SCALL_REG_GP, offsetof(struct thread_scall_regs, gp));
DEFINE(THREAD_SCALL_REG_TP, offsetof(struct thread_scall_regs, tp));
DEFINE(THREAD_SCALL_REG_T0, offsetof(struct thread_scall_regs, t0));
DEFINE(THREAD_SCALL_REG_A0, offsetof(struct thread_scall_regs, a0));
DEFINE(THREAD_SCALL_REG_T3, offsetof(struct thread_scall_regs, t3));
DEFINE(THREAD_SCALL_REG_EPC, offsetof(struct thread_scall_regs, epc));
DEFINE(THREAD_SCALL_REG_STATUS,
offsetof(struct thread_scall_regs, status));
DEFINE(THREAD_SCALL_REG_IE, offsetof(struct thread_scall_regs, ie));
DEFINE(THREAD_SCALL_REGS_SIZE, sizeof(struct thread_scall_regs));

/* struct core_mmu_config */
Expand Down
156 changes: 6 additions & 150 deletions core/arch/riscv/kernel/thread_arch.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,15 +103,15 @@ static void setup_unwind_user_mode(struct thread_scall_regs *regs)
regs->sp = thread_get_saved_thread_sp();
}

static void thread_unhandled_trap(struct thread_trap_regs *regs __unused,
unsigned long cause __unused)
static void thread_unhandled_trap(unsigned long cause __unused,
struct thread_ctx_regs *regs __unused)
{
DMSG("Unhandled trap xepc:0x%016lx xcause:0x%016lx xtval:0x%016lx",
read_csr(CSR_XEPC), read_csr(CSR_XCAUSE), read_csr(CSR_XTVAL));
panic();
}

void thread_scall_handler(struct thread_scall_regs *regs)
void thread_scall_handler(struct thread_scall_regs *regs)
{
struct ts_session *sess = NULL;
uint32_t state = 0;
Expand All @@ -137,171 +137,28 @@ void thread_scall_handler(struct thread_scall_regs *regs)
}
}

static void copy_scall_to_trap(struct thread_scall_regs *scall_regs,
struct thread_trap_regs *trap_regs)
{
trap_regs->a0 = scall_regs->a0;
trap_regs->a1 = scall_regs->a1;
trap_regs->a2 = scall_regs->a2;
trap_regs->a3 = scall_regs->a3;
trap_regs->a4 = scall_regs->a4;
trap_regs->a5 = scall_regs->a5;
trap_regs->a6 = scall_regs->a6;
trap_regs->a7 = scall_regs->a7;
trap_regs->t0 = scall_regs->t0;
trap_regs->t1 = scall_regs->t1;
}

static void copy_trap_to_scall(struct thread_trap_regs *trap_regs,
struct thread_scall_regs *scall_regs)
{
*scall_regs = (struct thread_scall_regs) {
.status = trap_regs->status,
.ra = trap_regs->ra,
.a0 = trap_regs->a0,
.a1 = trap_regs->a1,
.a2 = trap_regs->a2,
.a3 = trap_regs->a3,
.a4 = trap_regs->a4,
.a5 = trap_regs->a5,
.a6 = trap_regs->a6,
.a7 = trap_regs->a7,
.t0 = trap_regs->t0,
.t1 = trap_regs->t1,
};
}

static void thread_user_ecall_handler(struct thread_trap_regs *trap_regs)
{
struct thread_scall_regs scall_regs;
struct thread_core_local *l = thread_get_core_local();
int ct = l->curr_thread;

copy_trap_to_scall(trap_regs, &scall_regs);
thread_scall_handler(&scall_regs);
copy_scall_to_trap(&scall_regs, trap_regs);
/*
* Save kernel sp we'll had at the beginning of this function.
* This is when this TA has called another TA because
* __thread_enter_user_mode() also saves the stack pointer in this
* field.
*/
threads[ct].kern_sp = (unsigned long)(trap_regs + 1);
/*
* We are returning to U-Mode, on return, the program counter
* is set to xsepc (pc=xepc), we add 4 (size of an instruction)
* to continue to next instruction.
*/
trap_regs->epc += 4;
}

static void copy_trap_to_abort(struct thread_trap_regs *trap_regs,
struct thread_abort_regs *abort_regs)
{
*abort_regs = (struct thread_abort_regs) {
.status = trap_regs->status,
.ra = trap_regs->ra,
.sp = trap_regs->sp,
.gp = trap_regs->gp,
.tp = trap_regs->tp,
.t0 = trap_regs->t0,
.t1 = trap_regs->t1,
.t2 = trap_regs->t2,
.s0 = trap_regs->s0,
.s1 = trap_regs->s1,
.a0 = trap_regs->a0,
.a1 = trap_regs->a1,
.a2 = trap_regs->a2,
.a3 = trap_regs->a3,
.a4 = trap_regs->a4,
.a5 = trap_regs->a5,
.a6 = trap_regs->a6,
.a7 = trap_regs->a7,
.s2 = trap_regs->s2,
.s3 = trap_regs->s3,
.s4 = trap_regs->s4,
.s5 = trap_regs->s5,
.s6 = trap_regs->s6,
.s7 = trap_regs->s7,
.s8 = trap_regs->s8,
.s9 = trap_regs->s9,
.s10 = trap_regs->s10,
.s11 = trap_regs->s11,
.t3 = trap_regs->t3,
.t4 = trap_regs->t4,
.t5 = trap_regs->t5,
.t6 = trap_regs->t6,
};
}

static void thread_abort_handler(struct thread_trap_regs *trap_regs,
unsigned long cause)
{
struct thread_abort_regs abort_regs = { };

assert(cause == read_csr(CSR_XCAUSE));
copy_trap_to_abort(trap_regs, &abort_regs);
abort_regs.cause = read_csr(CSR_XCAUSE);
abort_regs.epc = read_csr(CSR_XEPC);
abort_regs.tval = read_csr(CSR_XTVAL);
abort_regs.satp = read_csr(CSR_SATP);
abort_handler(cause, &abort_regs);
}

static void thread_exception_handler(unsigned long cause,
struct thread_trap_regs *regs)
{
switch (cause) {
case CAUSE_USER_ECALL:
thread_user_ecall_handler(regs);
break;
default:
thread_abort_handler(regs, cause);
break;
}
}

static void thread_irq_handler(void)
{
interrupt_main_handler();
}

static void thread_interrupt_handler(unsigned long cause,
struct thread_trap_regs *regs)
void thread_interrupt_handler(unsigned long cause, struct thread_ctx_regs *regs)
{
switch (cause & LONG_MAX) {
case IRQ_XTIMER:
clear_csr(CSR_XIE, CSR_XIE_TIE);
break;
case IRQ_XSOFT:
thread_unhandled_trap(regs, cause);
thread_unhandled_trap(cause, regs);
break;
case IRQ_XEXT:
thread_irq_handler();
break;
default:
thread_unhandled_trap(regs, cause);
thread_unhandled_trap(cause, regs);
}
}

void thread_trap_handler(long cause, unsigned long epc __unused,
struct thread_trap_regs *regs,
bool user __maybe_unused)
{
/*
* The Interrupt bit (XLEN-1) in the cause register is set
* if the trap was caused by an interrupt.
*/
if (cause < 0)
thread_interrupt_handler(cause, regs);
/*
* Otherwise, cause is never written by the implementation,
* though it may be explicitly written by software.
*/
else
thread_exception_handler(cause, regs);
}

unsigned long xstatus_for_xret(uint8_t pie, uint8_t pp)
{
unsigned long xstatus = read_csr(CSR_XSTATUS);
Expand Down Expand Up @@ -611,7 +468,6 @@ void thread_init_tvec(void)
{
unsigned long tvec = (unsigned long)get_trap_vect();

static_assert(sizeof(struct thread_trap_regs) % 16 == 0);
write_csr(CSR_XTVEC, tvec);
assert(read_csr(CSR_XTVEC) == tvec);
}
Expand Down
Loading

0 comments on commit 1935e2c

Please sign in to comment.