curr_set(void);
struct ref*
prev_ref(void);
-struct model
+struct model*
curr_model(void);
void
inc_posts(void);
-#define CURR_CDAT (*cdat_stackp)
-#define CURR_SET set_list[CURR_CDAT->num_sets]
-#define REF_IDX (num_refs % (refs_per_page * pages_per_chunk))
-#define PREV_REF (ref_buf[num_ref_chunks] + (REF_IDX * (sizeof (struct ref)) + pagesize - (sizeof (struct ref))))
-#define CURR_REF (ref_buf[num_ref_chunks] + (REF_IDX * (sizeof (struct ref)) + pagesize))
-#define ODAT_IDX (num_odats % (odats_per_page * pages_per_chunk))
-#define CURR_ODAT (odat_buf[num_odat_chunks] + (ODAT_IDX * (sizeof (struct odat)) + pagesize))
-#define VDAT_IDX (num_vdats % (vdats_per_page * pages_per_chunk))
-#define CURR_VDAT (vdat_buf[num_vdat_chunks] + (VDAT_IDX * (sizeof (struct vdat)) + pagesize))
-#define CURR_MODEL (CURR_VDAT.model_list[CURR_VDAT.num_models])
-#define CURR_LINK (link_buf[num_links])
-#define CURR_POST (post_buf[num_posts])
-#define CURR_QUAD (CURR_ODAT->quad_file)
-
-long pagesize;
-
-int pages_per_chunk = 10;
-
-int num_cdats = 0;
-int curr_max_cdats = PTRS_IN_PAGE;
-
-struct cdat* cdat_buf[PTRS_IN_PAGE];
-struct cdat* cdat_stack[PTRS_IN_PAGE];
-struct cdat** cdat_stackp;
-
-
-int num_odat_chunks = 0;
-int num_odats = 0;
-void* odat_buf[MAX_CHUNKS];
-long odats_per_page;
-
+#define PAGES_PER_CHUNK 16
+#define CURR_CDAT (*cdat_stackp)
//"type free" chunk stacking
struct chunk_stack
{ void* chunks[MAX_CHUNKS];
void* csp; //chunk stack pointer
void* dsp[MAX_CHUNKS]; //dat stack pointer (per chunk)
-} ocs, vcs, ccs; //odat, vdat, and cdat chunk stacks
+ int max_dats; //num dats in a chunk
+} ocs, vcs, ccs, rcs, lcs, pcs; //odat, vdat, and cdat, ref, link, post stacks
//type safety handled by macro expansion
-#define CHUNK_LEN(STACK) ((STACK).csp - (STACK).chunks)
-#define CURRENT_DAT(STACK,TYPE) ((TYPE)(STACK.dsp[CHUNK_LEN(STACK)])
+#define CURRENT_CHUNK(STACK) ( (void*) (STACK).csp - (void*) (STACK).chunks - 1)
+#define CHUNKS_LEN(STACK) ( (void*) (STACK).csp - (void*) (STACK).chunks)
+#define CHUNK_FULL(STACK, TYPE) ( (CURRENT_DAT(STACK,TYPE) - (TYPE) CURRENT_CHUNK(STACK)) \
+ >= (STACK).max_dats )
+#define CSP_PUSH(STACK) (++(STACK).csp = malloc(pagesize * PAGES_PER_CHUNK))
+#define CURRENT_DAT(STACK,TYPE) ((TYPE) (STACK).dsp[CHUNKS_LEN(STACK)])
+#define PREVIOUS_DAT(STACK,TYPE) ((TYPE) (STACK).dsp[CHUNKS_LEN(STACK)]-1)
#define INCREMENT_DAT(STACK,TYPE) (++CURRENT_DAT(STACK,TYPE))
//Stack-specific macros
#define CURRENT_ODAT() (CURRENT_DAT(ocs,struct odat*))
#define CURRENT_VDAT() (CURRENT_DAT(vcs,struct vdat*))
#define CURRENT_CDAT() (CURRENT_DAT(ccs,struct cdat*))
+#define CURRENT_LINK() (CURRENT_DAT(lcs,struct link*))
+#define CURRENT_POST() (CURRENT_DAT(pcs,struct ref*))
+#define CURRENT_REF() (CURRENT_DAT(rcs,struct ref*))
+#define PREVIOUS_REF() (PREVIOUS_DAT(rcs, struct ref*))
//Metadata
+#define CURRENT_SET() (CURRENT_CDAT()->set_list[CURRENT_CDAT()->num_sets])
#define CURRENT_MODEL() (CURRENT_VDAT()->model_list[CURRENT_VDAT()->num_models])
-int num_vdat_chunks = 0;
-int num_vdats = 0;
-struct vdat* vdat_buf[MAX_CHUNKS];
-long vdats_per_page;
+#define CURR_QUAD (CURR_ODAT->quad_file)
+
+long pagesize;
+
+int pages_per_chunk = 10;
+
+int num_cdats = 0;
+int curr_max_cdats = PTRS_IN_PAGE;
+
+struct cdat* cdat_buf[PTRS_IN_PAGE];
+struct cdat* cdat_stack[PTRS_IN_PAGE];
+struct cdat** cdat_stackp;
+
+int num_odats = 0;
+
+int num_vdats = 0;
-int num_ref_chunks = 0;
int num_refs = 0;
-void* ref_buf[MAX_CHUNKS];
-long refs_per_page;
uint64_t ss_ref_id = 0x00FFFFFF; /* system space for ref_ids */
int num_posts = -1;
*cdat_stackp++ = cdat_buf[num_cdats++];
pagesize = sysconf(_SC_PAGESIZE);
- odats_per_page = (sizeof (struct odat)/pagesize);
- vdats_per_page = (sizeof (struct vdat)/pagesize);
- refs_per_page = (sizeof (struct ref)/pagesize);
return 0;
for(i = 0; i <= num_odats ; i++)
{
- free(odat_buf[i]);
}
for(i = 0; i <= num_cdats; i++)
{
- free(cdat_buf[i]);
}
for(i = 0; i <= num_vdats; i++)
{
- free(vdat_buf[i]);
}
for(i = 0; i <= num_refs; i++)
{
- free(ref_buf[i]);
}
for(i = 0; i<= num_links; i++)
{
- free(link_buf[i]);
+ }
+ for(i = 0; i<= num_posts; i++)
+ {
}
}
alloc_odat
()
{
- num_odats++;
+ if(CHUNK_FULL(ocs, struct odat*))
+ CSP_PUSH(ocs);
+ else
+ INCREMENT_DAT(ocs, struct odat*);
- if(!(num_odats % (odats_per_page * pages_per_chunk))) //chunk is full
- {
- num_odat_chunks++;
- if( ((odat_buf[num_odat_chunks] = malloc(odats_per_page * pages_per_chunk)) == NULL) )
- perror("malloc odat chunk failed");
- }
-
- return CURR_ODAT;
+ return CURRENT_ODAT();
}
void
{
num_vdats++;
- if(!(num_vdats % (vdats_per_page * pages_per_chunk))) //chunk is full
- {
- num_vdat_chunks++;
- if( ((vdat_buf[num_vdat_chunks] = malloc(vdats_per_page * pages_per_chunk)) == NULL) )
- perror("malloc vdat chunk failed");
- }
+ if(CHUNK_FULL(vcs, struct vdat*))
+ CSP_PUSH(vcs);
+ else
+ INCREMENT_DAT(vcs, struct vdat*);
}
{
num_links++;
- if(num_links >= curr_max_links)
- { if( (realloc((void*) link_buf, PTRS_IN_PAGE * 4)) == NULL)
- perror("realloc vdat_buf failed");
- curr_max_links += PTRS_IN_PAGE;
- }
- if((CURR_LINK = (struct link*) malloc(sizeof (struct link))) == NULL)
- perror("malloc link failed");
+ if(CHUNK_FULL(lcs, struct link*))
+ CSP_PUSH(lcs);
+ else
+ INCREMENT_DAT(lcs, struct link*);
- return CURR_LINK;
+ return CURRENT_LINK();
}
struct ref*
()
{
num_refs++;
+ if(CHUNK_FULL(rcs, struct link*))
+ CSP_PUSH(rcs);
+ else
+ INCREMENT_DAT(rcs, struct link*);
if(num_refs % 16 == 0)
- { CURR_POST = CURR_REF;
+ { CURRENT_POST() = CURRENT_REF();
inc_posts();
}
- if(!(num_refs % (refs_per_page * pages_per_chunk))) //chunk is full
- {
- num_ref_chunks++;
- if( ((ref_buf[num_ref_chunks] = malloc(refs_per_page * pages_per_chunk)) == NULL) )
- perror("malloc ref chunk failed");
- }
- return CURR_REF;
+ return CURRENT_REF();
}
void
inc_posts()
{
- if(num_posts >= curr_max_posts)
- { if( (realloc((void*) ref_buf, PTRS_IN_PAGE * 4)) == NULL)
- perror("realoc post_buf failed");
- curr_max_posts += PTRS_IN_PAGE;
- }
- if ((CURR_POST = (struct ref*) malloc (sizeof (struct ref))) == NULL)
- perror("malloc post failed");
+ num_posts++;
+
+ if(CHUNK_FULL(pcs, struct ref*))
+ {CSP_PUSH(pcs);}
+ else
+ INCREMENT_DAT(pcs, struct ref*);
+
}
curr_odat
()
{
- return CURR_ODAT;
+ return CURRENT_ODAT();
}
struct vdat*
curr_vdat
()
{
- return CURR_VDAT;
+ return CURRENT_VDAT();
}
struct set*
curr_set
()
{
- return &CURR_CDAT->CURR_SET;
+ return &CURRENT_SET();
}
struct ref*
prev_ref
()
{
- return PREV_REF;
+ return PREVIOUS_REF();
}
-struct model
+struct model*
curr_model
()
{
- return CURR_MODEL;
+ return &CURRENT_MODEL();
}