alloc_cdat(void);
struct odat*
alloc_odat(void);
-void
+struct vdat*
alloc_vdat(void);
struct link*
alloc_link(void);
curr_odat(void);
struct vdat*
curr_vdat(void);
+struct map*
+curr_map(void);
struct set*
curr_set(void);
struct ref*
-prev_ref(void);
+curr_ref(void);
struct model*
curr_model(void);
void
#define PAGES_PER_CHUNK 16
-#define CURR_CDAT (*cdat_stackp)
//"type free" chunk stacking
struct chunk_stack
{ void* chunks[MAX_CHUNKS];
- void* csp; //chunk stack pointer
+ void* *csp; //chunk stack pointer
void* dsp[MAX_CHUNKS]; //dat stack pointer (per chunk)
- int max_dats; //num dats in a chunk
-} ocs, vcs, ccs, rcs, lcs, pcs; //odat, vdat, and cdat, ref, link, post stacks
-
-//type safety handled by macro expansion
-#define CURRENT_CHUNK(STACK) ( (void*) (STACK).csp - (void*) (STACK).chunks - 1)
-#define CHUNKS_LEN(STACK) ( (void*) (STACK).csp - (void*) (STACK).chunks)
-#define CHUNK_FULL(STACK, TYPE) ( (CURRENT_DAT(STACK,TYPE) - (TYPE) CURRENT_CHUNK(STACK)) \
- >= (STACK).max_dats )
-#define CSP_PUSH(STACK) (++(STACK).csp = malloc(pagesize * PAGES_PER_CHUNK))
-#define CURRENT_DAT(STACK,TYPE) ((TYPE) (STACK).dsp[CHUNKS_LEN(STACK)])
-#define PREVIOUS_DAT(STACK,TYPE) ((TYPE) (STACK).dsp[CHUNKS_LEN(STACK)]-1)
-#define INCREMENT_DAT(STACK,TYPE) (++CURRENT_DAT(STACK,TYPE))
-//Stack-specific macros
-#define CURRENT_ODAT() (CURRENT_DAT(ocs,struct odat*))
-#define CURRENT_VDAT() (CURRENT_DAT(vcs,struct vdat*))
-#define CURRENT_CDAT() (CURRENT_DAT(ccs,struct cdat*))
-#define CURRENT_LINK() (CURRENT_DAT(lcs,struct link*))
-#define CURRENT_POST() (CURRENT_DAT(pcs,struct ref*))
-#define CURRENT_REF() (CURRENT_DAT(rcs,struct ref*))
-#define PREVIOUS_REF() (PREVIOUS_DAT(rcs, struct ref*))
+ int chunk_size; //size of a chunk (including its forfeited page)
+ int max_dats; //number of dats per chunk for this stack
+} ocs, vcs, ccs, rcs, lcs, pcs, mcs; //odat, vdat, cdat,map, ref, link, post stacks
+
+//type safety handled by macro expansion (do not call these directly from code, make dependent macros for access to these)
+#define CHUNKS_LEN(STACK) ((STACK).csp - (STACK).chunks)
+#define CURRENT_CHUNK(STACK) ((STACK).chunks[CHUNKS_LEN(STACK) - 1])
+#define CHUNKS_FULL(STACK) ( (STACK).csp >= \
+ (STACK).chunks + MAX_CHUNKS * (STACK).chunk_size)
+#define CURRENT_DSP(STACK,TYPE) ((TYPE*) ((STACK).dsp[CHUNKS_LEN(STACK) - 1]))
+#define DATA_FULL(STACK,TYPE) ((void*) CURRENT_DSP(STACK,TYPE) >= \
+ (CURRENT_CHUNK(STACK) + (STACK).chunk_size))
+#define CSP_PUSH(STACK) (*(++(STACK).csp) = malloc((STACK).chunk_size))
+#define CURRENT_DATP(STACK,TYPE) (((TYPE**)(STACK).dsp)[CHUNKS_LEN(STACK) - 1])
+#define PREVIOUS_DATP(STACK,TYPE) (((TYPE**)(STACK).dsp)[CHUNKS_LEN(STACK) - 2])
+#define ALLOC_DAT(STACK,TYPE) (++CURRENT_DATP(STACK,TYPE))
+#define INIT_STACK(STACK,TYPE) \
+ { int i; \
+ (STACK).chunk_size = PAGES_PER_CHUNK * pagesize; \
+ (STACK).max_dats = (STACK).chunk_size / sizeof (TYPE); \
+ CSP_PUSH(STACK); \
+ for( i = 0; i < MAX_CHUNKS; i++){ \
+ (STACK).dsp[i] += pagesize; \
+ } \
+ }
+//Stack-specific macros (called directly from code (safety enforcement)
+#define INIT_ODAT() (INIT_STACK(ocs, struct odat))
+#define CURRENT_ODAT() (CURRENT_DATP(ocs,struct odat))
+#define ODAT_FULL() (DATA_FULL(ocs,struct odat))
+#define ODAT_ALLOC() (ALLOC_DAT(ocs,struct odat))
+#define OCS_FULL() (CHUNKS_FULL(ocs))
+#define INIT_VDAT() (INIT_STACK(vcs, struct vdat))
+#define CURRENT_VDAT() (CURRENT_DATP(vcs,struct vdat))
+#define VDAT_FULL() (DATA_FULL(vcs,struct vdat))
+#define VDAT_ALLOC() (ALLOC_DAT(vcs,struct vdat))
+#define VCS_FULL() (CHUNKS_FULL(vcs))
+#define INIT_CDAT() (INIT_STACK(ccs, struct cdat))
+#define CURRENT_CDAT() (CURRENT_DATP(ccs,struct cdat))
+#define CDAT_FULL() (DATA_FULL(ccs, struct cdat))
+#define CDAT_ALLOC() (ALLOC_DAT(ccs, struct cdat))
+#define CCS_FULL() (CHUNKS_FULL(ccs))
+#define INIT_MAP() (INIT_STACK(mcs, struct map))
+#define CURRENT_MAP() (CURRENT_DATP(mcs, struct map))
+#define MAP_FULL() (DATA_FULL(mcs, struct map))
+#define MAP_ALLOC() (ALLOC_DAT(mcs, struct map))
+#define MCS_FULL() (CHUNKS_FULL(mcs))
+#define INIT_LINK() (INIT_STACK(lcs, struct link))
+#define CURRENT_LINK() (CURRENT_DATP(lcs,struct link))
+#define LDAT_FULL() (DATA_FULL(lcs, struct link))
+#define LDAT_ALLOC() (ALLOC_DAT(lcs, struct link))
+#define LCS_FULL() (CHUNKS_FULL(lcs))
+#define INIT_POST() (INIT_STACK(rcs, struct ref))
+#define CURRENT_POST() (CURRENT_DATP(pcs,struct ref))
+#define POST_FULL() (DATA_FULL(pcs,struct ref))
+#define POST_ALLOC() (ALLOC_DAT(pcs,struct ref))
+#define PCS_FULL() (CHUNKS_FULL(pcs))
+#define INIT_REF() (INIT_STACK(rcs, struct ref))
+#define CURRENT_REF() (CURRENT_DATP(rcs,struct ref))
+#define PREVIOUS_REF() (PREVIOUS_DATP(rcs, struct ref))
+#define REF_FULL() (DATA_FULL(rcs,struct ref))
+#define REF_ALLOC() (ALLOC_DAT(rcs,struct ref))
+#define RCS_FULL() (CHUNKS_FULL(rcs))
//Metadata
-#define CURRENT_SET() (CURRENT_CDAT()->set_list[CURRENT_CDAT()->num_sets])
-#define CURRENT_MODEL() (CURRENT_VDAT()->model_list[CURRENT_VDAT()->num_models])
+#define CURRENT_SET() (CURRENT_CDAT()->set_list[CURRENT_CDAT()->num_sets])
+//#define CURRENT_QUAD() (CURRENT_MAP()->quad_list[CURRENT_MAP()->num_quads])
+//#define CURRENT_MODEL() (CURRENT_VDAT()->model_list[CURRENT_VDAT()->num_models])
-#define CURR_QUAD (CURR_ODAT->quad_file)
long pagesize;
-int pages_per_chunk = 10;
-
int num_cdats = 0;
-int curr_max_cdats = PTRS_IN_PAGE;
-struct cdat* cdat_buf[PTRS_IN_PAGE];
-struct cdat* cdat_stack[PTRS_IN_PAGE];
+struct cdat* cdat_stack[MAX_CLASSES];
struct cdat** cdat_stackp;
int num_odats = 0;
int num_vdats = 0;
-int num_refs = 0;
-uint64_t ss_ref_id = 0x00FFFFFF; /* system space for ref_ids */
+int num_maps = 0;
-int num_posts = -1;
-int curr_max_posts = PTRS_IN_PAGE;
-struct ref* post_buf[PTRS_IN_PAGE];
+int num_refs = 0;
+int ss_ref_id = 0x0FFFFFFF; /* system space for ref_ids */
+int num_posts = 0;
-int num_links = -1;
-int curr_max_links = PTRS_IN_PAGE;
-struct link* link_buf[PTRS_IN_PAGE];
+int num_links = 0;
/* The initalization function of the IR. */
ir_init()
{
- /* Init root cdat and stack */
char root[4] = "root";
- if( (cdat_buf[num_cdats] = (struct cdat*) malloc(sizeof(struct cdat))) == NULL)
- {
- perror("malloc root class failed\n");
- return -1;
- }
- cdat_buf[num_cdats]->idx = num_cdats;
- memmove(cdat_buf[num_cdats]->name, root, 4);
+ pagesize = sysconf(_SC_PAGESIZE);
- cdat_stackp = cdat_stack;
- *cdat_stackp++ = cdat_buf[num_cdats++];
+ INIT_CDAT();
+ *cdat_stackp = CURRENT_CDAT();
+
+ memmove((*cdat_stackp)->name, root, 32);
+
+ INIT_ODAT();
+ INIT_VDAT();
+ INIT_MAP();
+ INIT_LINK();
+ INIT_REF();
+ INIT_POST();
- pagesize = sysconf(_SC_PAGESIZE);
return 0;
{
int i;
- for(i = 0; i <= num_odats ; i++)
+ for(i = 0; i < CHUNKS_LEN(ccs) ; i++)
{
+ free(ccs.chunks[i]);
}
- for(i = 0; i <= num_cdats; i++)
+ for(i = 0; i < CHUNKS_LEN(ocs); i++)
{
+ free(ocs.chunks[i]);
}
- for(i = 0; i <= num_vdats; i++)
+ for(i = 0; i < CHUNKS_LEN(vcs) ; i++)
{
+ free(vcs.chunks[i]);
}
- for(i = 0; i <= num_refs; i++)
+ for(i = 0; i < CHUNKS_LEN(rcs); i++)
{
+ free(rcs.chunks[i]);
}
- for(i = 0; i<= num_links; i++)
+ for(i = 0; i < CHUNKS_LEN(lcs); i++)
{
+ free(lcs.chunks[i]);
}
- for(i = 0; i<= num_posts; i++)
+ for(i = 0; i < CHUNKS_LEN(pcs); i++)
{
+ free(pcs.chunks[i]);
}
}
-//TODO: FREE MEMORY!
struct cdat*
alloc_cdat()
{
num_cdats++;
- if(curr_max_cdats <= num_cdats)
- { if( (realloc((void*) cdat_buf, PTRS_IN_PAGE * 4)) == NULL)
- perror("realloc cdat_buf failed");
- curr_max_cdats += PTRS_IN_PAGE;
- if( (realloc( (void*) cdat_stack, PTRS_IN_PAGE * 4)) == NULL) //increase cdat_stack also
- perror("realloc cdat_stack failed");
+ if(CDAT_FULL())
+ { if(CCS_FULL())
+ { fprintf(stderr, "You have allocated to many (%d) cdats ", num_cdats);
+ exit(EXIT_FAILURE);
+ }
+ else
+ CSP_PUSH(ccs);
}
- if( (CURR_CDAT = (struct cdat*) malloc(sizeof (struct cdat)) ) == NULL )
- perror("malloc cdat failed");
-
- return CURR_CDAT;
+ else
+ CDAT_ALLOC();
+ return CURRENT_CDAT();
}
+
+//these should probably be inline
struct odat*
alloc_odat
()
{
- if(CHUNK_FULL(ocs, struct odat*))
- CSP_PUSH(ocs);
+ num_odats++;
+ if(ODAT_FULL())
+ { if(!OCS_FULL())
+ { fprintf(stderr, "You have allocated to many (%d) odats ", num_odats);
+ exit(EXIT_FAILURE);
+ }
+ else
+ CSP_PUSH(ocs);
+ }
else
- INCREMENT_DAT(ocs, struct odat*);
+ ODAT_ALLOC();
return CURRENT_ODAT();
}
-void
+struct vdat*
alloc_vdat
()
-{
- num_vdats++;
+{ num_vdats++;
+ if(VDAT_FULL())
+ { if(!VCS_FULL())
+ { fprintf(stderr, "You have allocated to many (%d) vdats ", num_vdats);
+ exit(EXIT_FAILURE);
+ }
+ else
+ CSP_PUSH(vcs);
+ }
+ else
+ VDAT_ALLOC();
- if(CHUNK_FULL(vcs, struct vdat*))
- CSP_PUSH(vcs);
+ return CURRENT_VDAT();
+}
+
+struct map*
+alloc_map
+()
+{ num_maps++;
+ if(MAP_FULL())
+ { if(!MCS_FULL())
+ { fprintf(stderr, "You have allocated to many (%d) maps ", num_maps);
+ exit(EXIT_FAILURE);
+ }
+ else
+ CSP_PUSH(mcs);
+ }
else
- INCREMENT_DAT(vcs, struct vdat*);
+ MAP_ALLOC();
+ return CURRENT_MAP();
}
+
struct link*
alloc_link
()
-{
- num_links++;
-
- if(CHUNK_FULL(lcs, struct link*))
- CSP_PUSH(lcs);
+{ num_links++;
+ if(LDAT_FULL())
+ { if(!LCS_FULL())
+ { fprintf(stderr, "You have allocated to many (%d) links ", num_links);
+ exit(EXIT_FAILURE);
+ }
+ else
+ CSP_PUSH(lcs);
+ }
else
- INCREMENT_DAT(lcs, struct link*);
+ LDAT_ALLOC();
return CURRENT_LINK();
+
}
struct ref*
alloc_ref
()
-{
- num_refs++;
- if(CHUNK_FULL(rcs, struct link*))
- CSP_PUSH(rcs);
+{ num_refs++;
+ if(REF_FULL())
+ { if(!RCS_FULL())
+ { fprintf(stderr, "You have allocated to many (%d) refs ", num_refs);
+ exit(EXIT_FAILURE);
+ }
+ else
+ CSP_PUSH(rcs);
+ }
else
- INCREMENT_DAT(rcs, struct link*);
+ REF_ALLOC();
+
if(num_refs % 16 == 0)
{ CURRENT_POST() = CURRENT_REF();
void
inc_posts()
-{
- num_posts++;
-
- if(CHUNK_FULL(pcs, struct ref*))
- {CSP_PUSH(pcs);}
+{ num_posts++;
+ if(POST_FULL())
+ { if(!PCS_FULL())
+ { fprintf(stderr, "You have allocated to many (%d) refs ", num_posts);
+ exit(EXIT_FAILURE);
+ }
+ else
+ CSP_PUSH(pcs);
+ }
else
- INCREMENT_DAT(pcs, struct ref*);
-
+ POST_ALLOC();
}
curr_cdat
()
{
- return CURR_CDAT;
+ return (*cdat_stackp);
}
struct odat*
return &CURRENT_SET();
}
struct ref*
-prev_ref
+curr_ref
()
{
- return PREVIOUS_REF();
+ return CURRENT_REF();
}
-struct model*
-curr_model
+struct map*
+curr_map
()
{
- return &CURRENT_MODEL();
+ return CURRENT_MAP();
}
+/* struct quad* */
+/* curr_quad */
+/* () */
+/* { */
+/* return &CURRENT_QUAD(); */
+/* } */
+/* struct model* */
+/* curr_model */
+/* () */
+/* { */
+/* return &CURRENT_MODEL(); */
+/* } */