Skip to content

Commit 13844dc

Browse files
committed
WIP: schedule: add multi-core support for user-space LL scheduler
- add arch_schedulers_get_for_core() for task-carried core routing - route 8 task-aware scheduler callers via task->core under CONFIG_SOF_USERSPACE_LL - fix zephyr_domain_thread_tid() to accept core parameter instead of hardcoding core 0 - create domain threads for secondary cores in secondary_core_init() - remove FIXME workaround from arch_schedulers_get() Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
1 parent 8af1be4 commit 13844dc

9 files changed

Lines changed: 127 additions & 28 deletions

File tree

src/audio/pipeline/pipeline-schedule.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -291,7 +291,9 @@ void pipeline_schedule_triggered(struct pipeline_walk_context *ctx,
291291
* while pipeline state is being updated. The k_mutex is re-entrant
292292
* so schedule_task() calls inside the critical section are safe.
293293
*/
294-
zephyr_ll_lock_sched();
294+
int sched_core = ppl_data->start->ipc_config.core;
295+
296+
zephyr_ll_lock_sched(sched_core);
295297
#else
296298
uint32_t flags;
297299

@@ -358,7 +360,7 @@ void pipeline_schedule_triggered(struct pipeline_walk_context *ctx,
358360
}
359361
}
360362
#ifdef CONFIG_SOF_USERSPACE_LL
361-
zephyr_ll_unlock_sched();
363+
zephyr_ll_unlock_sched(sched_core);
362364
#else
363365
irq_local_enable(flags);
364366
#endif

src/include/sof/schedule/ll_schedule_domain.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -117,8 +117,8 @@ void zephyr_ll_task_free(struct task *task);
117117
struct k_heap *zephyr_ll_user_heap(void);
118118
void zephyr_ll_user_resources_init(void);
119119
void zephyr_ll_grant_access(struct k_thread *thread);
120-
void zephyr_ll_lock_sched(void);
121-
void zephyr_ll_unlock_sched(void);
120+
void zephyr_ll_lock_sched(int core);
121+
void zephyr_ll_unlock_sched(int core);
122122
#endif /* CONFIG_SOF_USERSPACE_LL */
123123

124124
static inline struct ll_schedule_domain *domain_init
@@ -317,7 +317,7 @@ struct ll_schedule_domain *zephyr_ll_domain(void);
317317
struct ll_schedule_domain *zephyr_domain_init(int clk);
318318
#define timer_domain_init(timer, clk) zephyr_domain_init(clk)
319319
#ifdef CONFIG_SOF_USERSPACE_LL
320-
struct k_thread *zephyr_domain_thread_tid(struct ll_schedule_domain *domain);
320+
struct k_thread *zephyr_domain_thread_tid(struct ll_schedule_domain *domain, int core);
321321
struct k_mem_domain *zephyr_ll_mem_domain(void);
322322
#endif /* CONFIG_SOF_USERSPACE_LL */
323323
#endif /* __ZEPHYR__ */

src/include/sof/schedule/schedule.h

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,10 @@ struct schedulers {
198198
*/
199199
struct schedulers **arch_schedulers_get(void);
200200

201+
#if CONFIG_SOF_USERSPACE_LL
202+
struct schedulers **arch_schedulers_get_for_core(int core);
203+
#endif
204+
201205
/**
202206
* Retrieves scheduler's data.
203207
* @param type SOF_SCHEDULE_ type.
@@ -218,10 +222,39 @@ static inline void *scheduler_get_data(uint16_t type)
218222
return NULL;
219223
}
220224

225+
#if CONFIG_SOF_USERSPACE_LL
226+
/**
227+
* Retrieves scheduler's data for a specific core.
228+
* @param type SOF_SCHEDULE_ type.
229+
* @param core Core ID to get scheduler data for.
230+
* @return Pointer to scheduler's data.
231+
*
232+
* Safe to call from user-space context — does not use cpu_get_id().
233+
*/
234+
static inline void *scheduler_get_data_for_core(uint16_t type, int core)
235+
{
236+
struct schedulers *schedulers = *arch_schedulers_get_for_core(core);
237+
struct schedule_data *sch;
238+
struct list_item *slist;
239+
240+
list_for_item(slist, &schedulers->list) {
241+
sch = container_of(slist, struct schedule_data, list);
242+
if (type == sch->type)
243+
return sch->data;
244+
}
245+
246+
return NULL;
247+
}
248+
#endif
249+
221250
/** See scheduler_ops::schedule_task_running */
222251
static inline int schedule_task_running(struct task *task)
223252
{
253+
#if CONFIG_SOF_USERSPACE_LL
254+
struct schedulers *schedulers = *arch_schedulers_get_for_core(task->core);
255+
#else
224256
struct schedulers *schedulers = *arch_schedulers_get();
257+
#endif
225258
struct schedule_data *sch;
226259
struct list_item *slist;
227260

@@ -243,7 +276,11 @@ static inline int schedule_task_running(struct task *task)
243276
static inline int schedule_task(struct task *task, uint64_t start,
244277
uint64_t period)
245278
{
279+
#if CONFIG_SOF_USERSPACE_LL
280+
struct schedulers *schedulers = *arch_schedulers_get_for_core(task->core);
281+
#else
246282
struct schedulers *schedulers = *arch_schedulers_get();
283+
#endif
247284
struct schedule_data *sch;
248285
struct list_item *slist;
249286

@@ -264,7 +301,11 @@ static inline int schedule_task(struct task *task, uint64_t start,
264301
static inline int schedule_task_before(struct task *task, uint64_t start,
265302
uint64_t period, struct task *before)
266303
{
304+
#if CONFIG_SOF_USERSPACE_LL
305+
struct schedulers *schedulers = *arch_schedulers_get_for_core(task->core);
306+
#else
267307
struct schedulers *schedulers = *arch_schedulers_get();
308+
#endif
268309
struct schedule_data *sch;
269310
struct list_item *slist;
270311

@@ -290,7 +331,11 @@ static inline int schedule_task_before(struct task *task, uint64_t start,
290331
static inline int schedule_task_after(struct task *task, uint64_t start,
291332
uint64_t period, struct task *after)
292333
{
334+
#if CONFIG_SOF_USERSPACE_LL
335+
struct schedulers *schedulers = *arch_schedulers_get_for_core(task->core);
336+
#else
293337
struct schedulers *schedulers = *arch_schedulers_get();
338+
#endif
294339
struct schedule_data *sch;
295340
struct list_item *slist;
296341

@@ -315,7 +360,11 @@ static inline int schedule_task_after(struct task *task, uint64_t start,
315360
/** See scheduler_ops::reschedule_task */
316361
static inline int reschedule_task(struct task *task, uint64_t start)
317362
{
363+
#if CONFIG_SOF_USERSPACE_LL
364+
struct schedulers *schedulers = *arch_schedulers_get_for_core(task->core);
365+
#else
318366
struct schedulers *schedulers = *arch_schedulers_get();
367+
#endif
319368
struct schedule_data *sch;
320369
struct list_item *slist;
321370

@@ -337,7 +386,11 @@ static inline int reschedule_task(struct task *task, uint64_t start)
337386
/** See scheduler_ops::schedule_task_cancel */
338387
static inline int schedule_task_cancel(struct task *task)
339388
{
389+
#if CONFIG_SOF_USERSPACE_LL
390+
struct schedulers *schedulers = *arch_schedulers_get_for_core(task->core);
391+
#else
340392
struct schedulers *schedulers = *arch_schedulers_get();
393+
#endif
341394
struct schedule_data *sch;
342395
struct list_item *slist;
343396

@@ -353,7 +406,11 @@ static inline int schedule_task_cancel(struct task *task)
353406
/** See scheduler_ops::schedule_task_free */
354407
static inline int schedule_task_free(struct task *task)
355408
{
409+
#if CONFIG_SOF_USERSPACE_LL
410+
struct schedulers *schedulers = *arch_schedulers_get_for_core(task->core);
411+
#else
356412
struct schedulers *schedulers = *arch_schedulers_get();
413+
#endif
357414
struct schedule_data *sch;
358415
struct list_item *slist;
359416

@@ -402,7 +459,11 @@ static inline int schedulers_restore(void)
402459
/** See scheduler_ops::scheduler_init_context */
403460
static inline struct k_thread *scheduler_init_context(struct task *task)
404461
{
462+
#if CONFIG_SOF_USERSPACE_LL
463+
struct schedulers *schedulers = *arch_schedulers_get_for_core(task->core);
464+
#else
405465
struct schedulers *schedulers = *arch_schedulers_get();
466+
#endif
406467
struct schedule_data *sch;
407468
struct list_item *slist;
408469

src/init/init.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,10 @@
4848

4949
LOG_MODULE_REGISTER(init, CONFIG_SOF_LOG_LEVEL);
5050

51+
#if CONFIG_SOF_USERSPACE_LL
52+
SOF_DEFINE_REG_UUID(sec_core_init);
53+
#endif
54+
5155
/* main firmware context */
5256
static struct sof sof;
5357

@@ -136,6 +140,18 @@ __cold int secondary_core_init(struct sof *sof)
136140
return err;
137141
#endif /* CONFIG_ZEPHYR_DP_SCHEDULER */
138142

143+
#if CONFIG_SOF_USERSPACE_LL
144+
/* Create domain thread for this secondary core's LL scheduler */
145+
{
146+
struct task *task = zephyr_ll_task_alloc();
147+
148+
schedule_task_init_ll(task, SOF_UUID(sec_core_init_uuid),
149+
SOF_SCHEDULE_LL_TIMER,
150+
0, NULL, NULL, cpu_get_id(), 0);
151+
scheduler_init_context(task);
152+
}
153+
#endif
154+
139155
/* initialize IDC mechanism */
140156
trace_point(TRACE_BOOT_PLATFORM_IDC);
141157
err = idc_init();

src/ipc/ipc4/helper.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -613,15 +613,15 @@ __cold static struct comp_buffer *ipc4_create_buffer(struct comp_dev *src, bool
613613
if (cross_core_bind) \
614614
domain_block(sof_get()->platform_timer_domain); \
615615
else \
616-
zephyr_ll_lock_sched(); \
616+
zephyr_ll_lock_sched(cpu_get_id()); \
617617
} while (0)
618618

619619
#define ll_unblock(cross_core_bind, flags) \
620620
do { \
621621
if (cross_core_bind) \
622622
domain_unblock(sof_get()->platform_timer_domain); \
623623
else \
624-
zephyr_ll_unlock_sched(); \
624+
zephyr_ll_unlock_sched(cpu_get_id()); \
625625
} while (0)
626626
#else
627627
#define ll_block(cross_core_bind, flags) \
@@ -672,8 +672,8 @@ static int ll_wait_finished_on_core(struct comp_dev *dev)
672672
#else
673673

674674
#if CONFIG_SOF_USERSPACE_LL
675-
#define ll_block(cross_core_bind, flags) zephyr_ll_lock_sched()
676-
#define ll_unblock(cross_core_bind, flags) zephyr_ll_unlock_sched()
675+
#define ll_block(cross_core_bind, flags) zephyr_ll_lock_sched(cpu_get_id())
676+
#define ll_unblock(cross_core_bind, flags) zephyr_ll_unlock_sched(cpu_get_id())
677677
#else
678678
#define ll_block(cross_core_bind, flags) irq_local_disable(flags)
679679
#define ll_unblock(cross_core_bind, flags) irq_local_enable(flags)

src/schedule/zephyr_domain.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -477,13 +477,12 @@ static void zephyr_domain_thread_free(struct ll_schedule_domain *domain,
477477
tr_info(&ll_tr, "thread_free done, core %d", core);
478478
}
479479

480-
struct k_thread *zephyr_domain_thread_tid(struct ll_schedule_domain *domain)
480+
struct k_thread *zephyr_domain_thread_tid(struct ll_schedule_domain *domain, int core)
481481
{
482482
struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain);
483-
int core = 0; /* cpu_get_id(); */
484483
struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core;
485484

486-
tr_dbg(&ll_tr, "entry");
485+
tr_dbg(&ll_tr, "entry core %d", core);
487486

488487
return dt->ll_thread;
489488
}

src/schedule/zephyr_ll.c

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,15 @@ static void zephyr_ll_unlock(struct zephyr_ll *sch, uint32_t *flags)
6666

6767
static void zephyr_ll_assert_core(const struct zephyr_ll *sch)
6868
{
69-
assert(CONFIG_CORE_COUNT == 1 || IS_ENABLED(CONFIG_SOF_USERSPACE_LL) ||
70-
sch->core == cpu_get_id());
69+
#if CONFIG_SOF_USERSPACE_LL
70+
/* In user-space mode, cpu_get_id() is not available.
71+
* Core correctness is ensured by task->core routing in
72+
* schedule.h and verified at task schedule time.
73+
*/
74+
(void)sch;
75+
#else
76+
assert(CONFIG_CORE_COUNT == 1 || sch->core == cpu_get_id());
77+
#endif
7178
}
7279

7380
/* Locking: caller should hold the domain lock */
@@ -533,15 +540,16 @@ struct k_thread *zephyr_ll_init_context(void *data, struct task *task)
533540
}
534541

535542
if (!k_is_user_context()) {
536-
k_thread_access_grant(zephyr_domain_thread_tid(sch->ll_domain), sch->lock);
543+
k_thread_access_grant(zephyr_domain_thread_tid(sch->ll_domain, task->core),
544+
sch->lock);
537545

538-
tr_dbg(&ll_tr, "granting access to lock %p for thread %p", sch->lock,
539-
zephyr_domain_thread_tid(sch->ll_domain));
546+
tr_dbg(&ll_tr, "granting access to lock %p for thread %p on core %d", sch->lock,
547+
zephyr_domain_thread_tid(sch->ll_domain, task->core), task->core);
540548
tr_dbg(&ll_tr, "granting access to domain lock %p for thread %p", &sch->ll_domain->lock,
541-
zephyr_domain_thread_tid(sch->ll_domain));
549+
zephyr_domain_thread_tid(sch->ll_domain, task->core));
542550
}
543551

544-
return zephyr_domain_thread_tid(sch->ll_domain);
552+
return zephyr_domain_thread_tid(sch->ll_domain, task->core);
545553
}
546554

547555
void zephyr_ll_free_context(void *data)
@@ -592,19 +600,21 @@ void zephyr_ll_grant_access(struct k_thread *thread)
592600
* schedule_task() calls within the locked section will not deadlock.
593601
* Must be paired with zephyr_ll_unlock_sched().
594602
*/
595-
void zephyr_ll_lock_sched(void)
603+
void zephyr_ll_lock_sched(int core)
596604
{
597-
struct zephyr_ll *sch = (struct zephyr_ll *)scheduler_get_data(SOF_SCHEDULE_LL_TIMER);
605+
struct zephyr_ll *sch = (struct zephyr_ll *)scheduler_get_data_for_core(SOF_SCHEDULE_LL_TIMER,
606+
core);
598607

599608
k_mutex_lock(sch->lock, K_FOREVER);
600609
}
601610

602611
/**
603612
* Unlock the LL scheduler after a previous zephyr_ll_lock_sched() call.
604613
*/
605-
void zephyr_ll_unlock_sched(void)
614+
void zephyr_ll_unlock_sched(int core)
606615
{
607-
struct zephyr_ll *sch = (struct zephyr_ll *)scheduler_get_data(SOF_SCHEDULE_LL_TIMER);
616+
struct zephyr_ll *sch = (struct zephyr_ll *)scheduler_get_data_for_core(SOF_SCHEDULE_LL_TIMER,
617+
core);
608618

609619
k_mutex_unlock(sch->lock);
610620
}

uuid-registry.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,7 @@ d7f6712d-131c-45a7-82ed6aa9dc2291ea pm_runtime
146146
9302adf5-88be-4234-a0a7dca538ef81f4 sai
147147
3dee06de-f25a-4e10-ae1fabc9573873ea schedule
148148
70d223ef-2b91-4aac-b444d89a0db2793a sdma
149+
bdcb1461-34f5-4047-b9cc70fdf8dfb234 sec_core_init
149150
55a88ed5-3d18-46ca-88f10ee6eae9930f selector
150151
32fe92c1-1e17-4fc2-9758c7f3542e980a selector4
151152
cf90d851-68a2-4987-a2de85aed0c8531c sgen_mt8186

zephyr/schedule.c

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,21 @@ static APP_TASK_BSS struct schedulers *_schedulers[CONFIG_CORE_COUNT];
2222
*/
2323
struct schedulers **arch_schedulers_get(void)
2424
{
25-
if (k_is_user_context()) {
26-
printk("FIXME: using core0 scheduler\n");
27-
return _schedulers;
28-
}
29-
3025
return _schedulers + cpu_get_id();
3126
}
3227
EXPORT_SYMBOL(arch_schedulers_get);
28+
29+
#if CONFIG_SOF_USERSPACE_LL
30+
/**
31+
* Retrieves registered schedulers for a specific core.
32+
* @param core Core ID to get schedulers for.
33+
* @return List of registered schedulers for the specified core.
34+
*
35+
* Safe to call from user-space context — does not use cpu_get_id().
36+
*/
37+
struct schedulers **arch_schedulers_get_for_core(int core)
38+
{
39+
return _schedulers + core;
40+
}
41+
EXPORT_SYMBOL(arch_schedulers_get_for_core);
42+
#endif

0 commit comments

Comments
 (0)