Remove TODO for overflow check (#842)

The check was already done. It’s been simplified.
Reformat to get consistent style throughout the functions.
This commit is contained in:
Matthieu Darbois 2016-09-15 23:51:34 +02:00 committed by GitHub
parent 9a07ccb3d0
commit 6e7616c83c
1 changed files with 119 additions and 105 deletions

View File

@ -625,9 +625,10 @@ static void opj_dwt_decode_v_func(void* user_data, opj_tls_t* tls)
/* <summary> */ /* <summary> */
/* Inverse wavelet transform in 2-D. */ /* Inverse wavelet transform in 2-D. */
/* </summary> */ /* </summary> */
static OPJ_BOOL opj_dwt_decode_tile(opj_thread_pool_t* tp, opj_tcd_tilecomp_t* tilec, OPJ_UINT32 numres, DWT1DFN dwt_1D) { static OPJ_BOOL opj_dwt_decode_tile(opj_thread_pool_t* tp, opj_tcd_tilecomp_t* tilec, OPJ_UINT32 numres, DWT1DFN dwt_1D)
{
opj_dwt_t h; opj_dwt_t h;
opj_dwt_t v; opj_dwt_t v;
@ -673,120 +674,133 @@ static OPJ_BOOL opj_dwt_decode_tile(opj_thread_pool_t* tp, opj_tcd_tilecomp_t* t
h.dn = (OPJ_INT32)(rw - (OPJ_UINT32)h.sn); h.dn = (OPJ_INT32)(rw - (OPJ_UINT32)h.sn);
h.cas = tr->x0 % 2; h.cas = tr->x0 % 2;
if( num_threads <= 1 || rh == 1 ) if( num_threads <= 1 || rh <= 1 )
{ {
for(j = 0; j < rh; ++j) { for(j = 0; j < rh; ++j) {
opj_dwt_interleave_h(&h, &tiledp[j*w]); opj_dwt_interleave_h(&h, &tiledp[j*w]);
(dwt_1D)(&h); (dwt_1D)(&h);
memcpy(&tiledp[j*w], h.mem, rw * sizeof(OPJ_INT32)); memcpy(&tiledp[j*w], h.mem, rw * sizeof(OPJ_INT32));
} }
} }
else else
{ {
OPJ_UINT32 num_jobs = (OPJ_UINT32)num_threads; OPJ_UINT32 num_jobs = (OPJ_UINT32)num_threads;
if( rh < num_jobs ) { OPJ_UINT32 step_j;
num_jobs = rh;
}
for( j = 0; j < num_jobs; j++ )
{
opj_dwd_decode_h_job_t* job;
job = (opj_dwd_decode_h_job_t*) opj_malloc(sizeof(opj_dwd_decode_h_job_t)); if( rh < num_jobs ) {
if( !job ) num_jobs = rh;
{ }
/* It would be nice to fallback to single thread case, but */ step_j = (rh / num_jobs);
/* unfortunately some jobs may be launched and have modified */
/* tiledp, so it is not practical to recover from that error */ for(j = 0; j < num_jobs; j++)
/* FIXME event manager error callback */ {
opj_thread_pool_wait_completion(tp, 0); opj_dwd_decode_h_job_t* job;
opj_aligned_free(h.mem);
return OPJ_FALSE; job = (opj_dwd_decode_h_job_t*) opj_malloc(sizeof(opj_dwd_decode_h_job_t));
} if( !job )
job->h = h; {
job->dwt_1D = dwt_1D; /* It would be nice to fallback to single thread case, but */
job->rw = rw; /* unfortunately some jobs may be launched and have modified */
job->w = w; /* tiledp, so it is not practical to recover from that error */
job->tiledp = tiledp; /* FIXME event manager error callback */
job->min_j = j * (rh / num_jobs); opj_thread_pool_wait_completion(tp, 0);
job->max_j = (j+1) * (rh / num_jobs); /* TODO this can overflow */ opj_aligned_free(h.mem);
if( job->max_j > rh || j == num_jobs - 1 ) return OPJ_FALSE;
job->max_j = rh; }
job->h.mem = (OPJ_INT32*)opj_aligned_malloc(h_mem_size); job->h = h;
if (!job->h.mem) job->dwt_1D = dwt_1D;
{ job->rw = rw;
/* FIXME event manager error callback */ job->w = w;
opj_thread_pool_wait_completion(tp, 0); job->tiledp = tiledp;
opj_free(job); job->min_j = j * step_j;
opj_aligned_free(h.mem); job->max_j = (j + 1U) * step_j; /* this can overflow */
return OPJ_FALSE; if( j == (num_jobs - 1U) ) { /* this will take care of the overflow */
} job->max_j = rh;
opj_thread_pool_submit_job( tp, opj_dwt_decode_h_func, job ); }
} job->h.mem = (OPJ_INT32*)opj_aligned_malloc(h_mem_size);
opj_thread_pool_wait_completion(tp, 0); if (!job->h.mem)
} {
/* FIXME event manager error callback */
opj_thread_pool_wait_completion(tp, 0);
opj_free(job);
opj_aligned_free(h.mem);
return OPJ_FALSE;
}
opj_thread_pool_submit_job( tp, opj_dwt_decode_h_func, job );
}
opj_thread_pool_wait_completion(tp, 0);
}
v.dn = (OPJ_INT32)(rh - (OPJ_UINT32)v.sn); v.dn = (OPJ_INT32)(rh - (OPJ_UINT32)v.sn);
v.cas = tr->y0 % 2; v.cas = tr->y0 % 2;
if( num_threads <= 1 || rw == 1 ) if( num_threads <= 1 || rw <= 1 )
{ {
for(j = 0; j < rw; ++j){ for(j = 0; j < rw; ++j){
OPJ_UINT32 k; OPJ_UINT32 k;
opj_dwt_interleave_v(&v, &tiledp[j], (OPJ_INT32)w);
(dwt_1D)(&v);
for(k = 0; k < rh; ++k) {
tiledp[k * w + j] = v.mem[k];
}
}
}
else
{
OPJ_UINT32 num_jobs = (OPJ_UINT32)num_threads;
if( rw < num_jobs )
num_jobs = rw;
for( j = 0; j < num_jobs; j++ )
{
opj_dwd_decode_v_job_t* job;
job = (opj_dwd_decode_v_job_t*) opj_malloc(sizeof(opj_dwd_decode_v_job_t)); opj_dwt_interleave_v(&v, &tiledp[j], (OPJ_INT32)w);
if( !job ) (dwt_1D)(&v);
{ for(k = 0; k < rh; ++k) {
/* It would be nice to fallback to single thread case, but */ tiledp[k * w + j] = v.mem[k];
/* unfortunately some jobs may be launched and have modified */ }
/* tiledp, so it is not practical to recover from that error */ }
/* FIXME event manager error callback */ }
opj_thread_pool_wait_completion(tp, 0); else
opj_aligned_free(v.mem); {
return OPJ_FALSE; OPJ_UINT32 num_jobs = (OPJ_UINT32)num_threads;
} OPJ_UINT32 step_j;
job->v = v;
job->dwt_1D = dwt_1D; if( rw < num_jobs ) {
job->rh = rh; num_jobs = rw;
job->w = w; }
job->tiledp = tiledp; step_j = (rw / num_jobs);
job->min_j = j * (rw / num_jobs);
job->max_j = (j+1) * (rw / num_jobs); /* TODO this can overflow */ for( j = 0; j < num_jobs; j++ )
if( job->max_j > rw || j == num_jobs - 1 ) {
job->max_j = rw; opj_dwd_decode_v_job_t* job;
job->v.mem = (OPJ_INT32*)opj_aligned_malloc(h_mem_size);
if (!job->v.mem) job = (opj_dwd_decode_v_job_t*) opj_malloc(sizeof(opj_dwd_decode_v_job_t));
{ if( !job )
/* FIXME event manager error callback */ {
opj_thread_pool_wait_completion(tp, 0); /* It would be nice to fallback to single thread case, but */
opj_free(job); /* unfortunately some jobs may be launched and have modified */
opj_aligned_free(v.mem); /* tiledp, so it is not practical to recover from that error */
return OPJ_FALSE; /* FIXME event manager error callback */
} opj_thread_pool_wait_completion(tp, 0);
opj_thread_pool_submit_job( tp, opj_dwt_decode_v_func, job ); opj_aligned_free(v.mem);
} return OPJ_FALSE;
opj_thread_pool_wait_completion(tp, 0); }
} job->v = v;
job->dwt_1D = dwt_1D;
job->rh = rh;
job->w = w;
job->tiledp = tiledp;
job->min_j = j * step_j;
job->max_j = (j + 1U) * step_j; /* this can overflow */
if( j == (num_jobs - 1U) ) { /* this will take care of the overflow */
job->max_j = rw;
}
job->v.mem = (OPJ_INT32*)opj_aligned_malloc(h_mem_size);
if (!job->v.mem)
{
/* FIXME event manager error callback */
opj_thread_pool_wait_completion(tp, 0);
opj_free(job);
opj_aligned_free(v.mem);
return OPJ_FALSE;
}
opj_thread_pool_submit_job( tp, opj_dwt_decode_v_func, job );
}
opj_thread_pool_wait_completion(tp, 0);
}
} }
opj_aligned_free(h.mem); opj_aligned_free(h.mem);
return OPJ_TRUE; return OPJ_TRUE;
} }
static void opj_v4dwt_interleave_h(opj_v4dwt_t* OPJ_RESTRICT w, OPJ_FLOAT32* OPJ_RESTRICT a, OPJ_INT32 x, OPJ_INT32 size){ static void opj_v4dwt_interleave_h(opj_v4dwt_t* OPJ_RESTRICT w, OPJ_FLOAT32* OPJ_RESTRICT a, OPJ_INT32 x, OPJ_INT32 size)
{
OPJ_FLOAT32* OPJ_RESTRICT bi = (OPJ_FLOAT32*) (w->wavelet + w->cas); OPJ_FLOAT32* OPJ_RESTRICT bi = (OPJ_FLOAT32*) (w->wavelet + w->cas);
OPJ_INT32 count = w->sn; OPJ_INT32 count = w->sn;
OPJ_INT32 i, k; OPJ_INT32 i, k;