[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Commit-gnuradio] r10215 - in gnuradio/branches/developers/eb/gcell-wip/
From: |
eb |
Subject: |
[Commit-gnuradio] r10215 - in gnuradio/branches/developers/eb/gcell-wip/gcell: apps lib/runtime lib/runtime/spu |
Date: |
Tue, 13 Jan 2009 18:55:17 -0700 (MST) |
Author: eb
Date: 2009-01-13 18:55:06 -0700 (Tue, 13 Jan 2009)
New Revision: 10215
Modified:
gnuradio/branches/developers/eb/gcell-wip/gcell/apps/benchmark_roundtrip.cc
gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/gc_job_manager_impl.cc
gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/gc_job_manager_impl.h
gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/spu/gc_main.c
Log:
work-in-progress: cuts gcell round trip time in half.
Modified:
gnuradio/branches/developers/eb/gcell-wip/gcell/apps/benchmark_roundtrip.cc
===================================================================
--- gnuradio/branches/developers/eb/gcell-wip/gcell/apps/benchmark_roundtrip.cc
2009-01-13 22:05:56 UTC (rev 10214)
+++ gnuradio/branches/developers/eb/gcell-wip/gcell/apps/benchmark_roundtrip.cc
2009-01-14 01:55:06 UTC (rev 10215)
@@ -139,7 +139,7 @@
gbi += dma_size;
}
- int niter = 1000;
+ int niter = 100000;
omni_time t_start = omni_time::time();
for (int iter = 0; iter < niter; iter++){
Modified:
gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/gc_job_manager_impl.cc
===================================================================
---
gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/gc_job_manager_impl.cc
2009-01-13 22:05:56 UTC (rev 10214)
+++
gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/gc_job_manager_impl.cc
2009-01-14 01:55:06 UTC (rev 10215)
@@ -99,6 +99,7 @@
: d_debug(0), d_spu_args(0),
d_eh_cond(&d_eh_mutex), d_eh_thread(0), d_eh_state(EHS_INIT),
d_shutdown_requested(false),
+ d_jc_cond(&d_jc_mutex), d_jc_thread(0), d_jc_njobs_active(0),
d_client_thread(0), d_ea_args_maxsize(0),
d_proc_def(0), d_proc_def_ls_addr(0), d_nproc_defs(0)
{
@@ -218,6 +219,7 @@
// fprintf(stderr, "d_proc_def_ls_addr = 0x%0x\n", d_proc_def_ls_addr);
int spe_flags = (SPE_EVENTS_ENABLE
+ | SPE_MAP_PS
| SPE_CFG_SIGNOTIFY1_OR
| SPE_CFG_SIGNOTIFY2_OR);
@@ -228,6 +230,14 @@
perror("spe_context_create");
throw std::runtime_error("spe_context_create");
}
+
+ d_worker[i].spe_ctrl =
+ (spe_spu_control_area_t *)spe_ps_area_get(d_worker[i].spe_ctx,
SPE_CONTROL_AREA);
+ if (d_worker[i].spe_ctrl == 0){
+ perror("spe_ps_area_get(SPE_CONTROL_AREA)");
+ throw std::runtime_error("spe_ps_area_get(SPE_CONTROL_AREA)");
+ }
+
d_worker[i].spe_idx = i;
d_worker[i].spu_args = &d_spu_args[i];
d_worker[i].spu_args->queue = ptr_to_ea(d_queue);
@@ -315,7 +325,6 @@
// create the spe event handler & worker (SPE) threads
create_event_handler();
-
}
////////////////////////////////////////////////////////////////////////
@@ -340,6 +349,7 @@
omni_mutex_lock l(d_eh_mutex);
d_shutdown_requested = true; // set flag for event handler thread
+ d_jc_cond.signal(); // wake up job completer
// should only happens during early QA code
if (d_eh_thread == 0 && d_eh_state == EHS_INIT)
@@ -420,6 +430,24 @@
////////////////////////////////////////////////////////////////////////
+
+inline void
+gc_job_manager_impl::incr_njobs_active()
+{
+ omni_mutex_lock l(d_jc_mutex);
+
+ if (d_jc_njobs_active++ == 0) // signal on 0 to 1 transition
+ d_jc_cond.signal();
+}
+
+inline void
+gc_job_manager_impl::decr_njobs_active(int n)
+{
+ omni_mutex_lock l(d_jc_mutex);
+ d_jc_njobs_active -= n;
+}
+
+
/*
* We check as much as we can here on the PPE side, so that the SPE
* doesn't have to.
@@ -522,8 +550,7 @@
jd->status = JS_OK;
jd->sys.client_id = cti->d_client_id;
- // FIXME keep count of jobs in progress?
-
+ incr_njobs_active();
gc_jd_queue_enqueue(d_queue, jd);
return true;
}
@@ -685,6 +712,14 @@
return 0;
}
+static void *
+start_job_completer(void *arg)
+{
+ gc_job_manager_impl *p = (gc_job_manager_impl *) arg;
+ p->job_completer_loop();
+ return 0;
+}
+
void
gc_job_manager_impl::create_event_handler()
{
@@ -709,12 +744,18 @@
}
}
- // create our event handling thread
+ // create the event handling thread
if (!start_thread(&d_eh_thread, start_event_handler, this, "event_handler")){
throw std::runtime_error("pthread_create");
}
+ // create the job completion thread
+
+ if (!start_thread(&d_jc_thread, start_job_completer, this, "job_completer")){
+ throw std::runtime_error("pthread_create");
+ }
+
// create the SPE worker threads
bool ok = true;
@@ -805,6 +846,8 @@
return;
}
+ decr_njobs_active(ci->ncomplete);
+
if (0){
static int total_jobs;
static int total_msgs;
@@ -902,12 +945,13 @@
else {
for (int i = 0; i < n; i++){
switch(MBOX_MSG_OP(msg[i])){
+#if 0
case OP_JOBS_DONE:
if (debug())
printf("eh: job_done (0x%08x) from spu[%d]\n", msg[i], spe_num);
notify_clients_jobs_are_done(spe_num, MBOX_MSG_ARG(msg[i]));
break;
-
+#endif
case OP_SPU_BUFSIZE:
set_ea_args_maxsize(MBOX_MSG_ARG(msg[i]));
break;
@@ -1050,8 +1094,61 @@
}
////////////////////////////////////////////////////////////////////////
-// This is the top of the SPE worker threads
+void
+gc_job_manager_impl::poll_for_job_completion()
+{
+ static const int niter = 1000;
+ bool handled_something = false;
+
+ for (int n = 0; n < niter; n++){
+
+ for (unsigned int spe_num = 0; spe_num < d_options.nspes; spe_num++){
+ volatile spe_spu_control_area_t *spe_ctrl = d_worker[spe_num].spe_ctrl;
+ int nentries = spe_ctrl->SPU_Mbox_Stat & 0xFF;
+ while (nentries-- > 0){
+ handled_something = true;
+ unsigned int msg = spe_ctrl->SPU_Out_Mbox;
+ switch(MBOX_MSG_OP(msg)){
+ case OP_JOBS_DONE:
+ if (debug())
+ printf("jc: job_done (0x%08x) from spu[%d]\n", msg, spe_num);
+ notify_clients_jobs_are_done(spe_num, MBOX_MSG_ARG(msg));
+ break;
+
+ default:
+ printf("jc: Unexpected msg (0x%08x) from spu[%d]\n", msg, spe_num);
+ break;
+ }
+ }
+ }
+ if (handled_something)
+ return;
+
+ // FIXME stall our thread pipeline to keep from sucking up all cycles
+ }
+}
+
+//
+// This is the "main program" of the job completer thread
+//
+void
+gc_job_manager_impl::job_completer_loop()
+{
+ while (!d_shutdown_requested){
+ {
+ omni_mutex_lock l(d_jc_mutex);
+ while (d_jc_njobs_active <= 0 && !d_shutdown_requested)
+ d_jc_cond.wait();
+ }
+
+ poll_for_job_completion();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// this is the top of the SPE worker threads
+
static void *
start_worker(void *arg)
{
Modified:
gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/gc_job_manager_impl.h
===================================================================
---
gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/gc_job_manager_impl.h
2009-01-13 22:05:56 UTC (rev 10214)
+++
gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/gc_job_manager_impl.h
2009-01-14 01:55:06 UTC (rev 10215)
@@ -1,6 +1,6 @@
/* -*- c++ -*- */
/*
- * Copyright 2007,2008 Free Software Foundation, Inc.
+ * Copyright 2007,2008,2009 Free Software Foundation, Inc.
*
* This file is part of GNU Radio
*
@@ -44,14 +44,15 @@
};
struct worker_ctx {
- volatile worker_state state;
- unsigned int spe_idx; // [0, nspes-1]
- spe_context_ptr_t spe_ctx;
- pthread_t thread;
- gc_spu_args_t *spu_args; // pointer to 16-byte aligned
struct
+ volatile worker_state state;
+ unsigned int spe_idx; // [0, nspes-1]
+ spe_context_ptr_t spe_ctx;
+ spe_spu_control_area_t *spe_ctrl;
+ pthread_t thread;
+ gc_spu_args_t *spu_args; // pointer to 16-byte aligned
struct
worker_ctx()
- : state(WS_FREE), spe_idx(0), spe_ctx(0),
+ : state(WS_FREE), spe_idx(0), spe_ctx(0), spe_ctrl(0),
thread(0), spu_args(0) {}
~worker_ctx();
};
@@ -107,6 +108,11 @@
volatile bool d_shutdown_requested;
spe_event_handler d_spe_event_handler;
+ // used to coordinate communication w/ the job completer thread
+ omni_mutex d_jc_mutex;
+ omni_condition d_jc_cond;
+ pthread_t d_jc_thread; // the job completion thread
+ int d_jc_njobs_active; // # of jobs submitted but not
yet reaped
// All of the job descriptors are hung off of here.
// We allocate them all in a single cache aligned chunk.
@@ -150,12 +156,16 @@
public:
void event_handler_loop(); // really private
+ void job_completer_loop(); // really private
private:
bool send_all_spes(uint32_t msg);
bool send_spe(unsigned int spe, uint32_t msg);
void print_event(spe_event_unit_t *evt);
void handle_event(spe_event_unit_t *evt);
+ void incr_njobs_active();
+ void decr_njobs_active(int n);
+ void poll_for_job_completion();
// bitvector ops
void bv_zero(unsigned long *bv);
Modified:
gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/spu/gc_main.c
===================================================================
--- gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/spu/gc_main.c
2009-01-13 22:05:56 UTC (rev 10214)
+++ gnuradio/branches/developers/eb/gcell-wip/gcell/lib/runtime/spu/gc_main.c
2009-01-14 01:55:06 UTC (rev 10215)
@@ -49,6 +49,9 @@
#define ROUND_UP(x, p2) (((x)+((p2)-1)) & ~((p2)-1))
+//#define OUT_MBOX_CHANNEL SPU_WrOutIntrMbox
+#define OUT_MBOX_CHANNEL SPU_WrOutMbox
+
#define USE_LLR_LOST_EVENT 0 // define to 0 or 1
int gc_sys_tag; // tag for misc DMA operations
@@ -101,7 +104,7 @@
if (p->in_use == 0)
return;
- gc_udelay(5);
+ gc_udelay(1);
} while (1);
}
@@ -143,7 +146,7 @@
put_in_progress, ci_idx, comp_info.ncomplete, total_complete);
// send PPE a message
- spu_writech(SPU_WrOutIntrMbox, MK_MBOX_MSG(OP_JOBS_DONE, ci_idx));
+ spu_writech(OUT_MBOX_CHANNEL, MK_MBOX_MSG(OP_JOBS_DONE, ci_idx));
ci_idx ^= 0x1; // switch buffers
comp_info.in_use = 1;
@@ -663,7 +666,7 @@
// If we've got job completion info for the PPE and we can send a
// message without blocking, do it.
- if (comp_info.ncomplete != 0 && spu_readchcnt(SPU_WrOutIntrMbox) != 0){
+ if (comp_info.ncomplete != 0 && spu_readchcnt(OUT_MBOX_CHANNEL) != 0){
gc_log_write0(GCL_SS_SYS, 0x12);
flush_completion_info();
}
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- [Commit-gnuradio] r10215 - in gnuradio/branches/developers/eb/gcell-wip/gcell: apps lib/runtime lib/runtime/spu,
eb <=