Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
att
GitHub Repository: att/ast
Path: blob/master/src/lib/libast/vmalloc/vmprivate.c
1810 views
1
/***********************************************************************
2
* *
3
* This software is part of the ast package *
4
* Copyright (c) 1985-2012 AT&T Intellectual Property *
5
* and is licensed under the *
6
* Eclipse Public License, Version 1.0 *
7
* by AT&T Intellectual Property *
8
* *
9
* A copy of the License is available at *
10
* http://www.eclipse.org/org/documents/epl-v10.html *
11
* (with md5 checksum b35adb5213ca9657e911e9befb180842) *
12
* *
13
* Information and Software Systems Research *
14
* AT&T Research *
15
* Florham Park NJ *
16
* *
17
* Glenn Fowler <[email protected]> *
18
* David Korn <[email protected]> *
19
* Phong Vo <[email protected]> *
20
* *
21
***********************************************************************/
22
#if defined(_UWIN) && defined(_BLD_ast)
23
24
void _STUB_vmprivate(){}
25
26
#else
27
28
#include "vmhdr.h"
29
30
static char* Version = "\n@(#)$Id: Vmalloc (AT&T Labs - Research) 2011-08-08 $\0\n";
31
32
33
/* Private code used in the vmalloc library
34
**
35
** Written by Kiem-Phong Vo, [email protected], 01/16/94.
36
*/
37
38
/* Get more memory for a region */
39
#if __STD_C
40
static Block_t* _vmextend(reg Vmalloc_t* vm, size_t size, Vmsearch_f searchf )
41
#else
42
static Block_t* _vmextend(vm, size, searchf )
43
reg Vmalloc_t* vm; /* region to increase in size */
44
size_t size; /* desired amount of space */
45
Vmsearch_f searchf; /* tree search function */
46
#endif
47
{
48
reg size_t s;
49
reg Seg_t* seg;
50
reg Block_t *bp, *tp, *np;
51
reg Vmuchar_t* addr = (Vmuchar_t*)Version; /* shut compiler warning */
52
reg Vmdata_t* vd = vm->data;
53
54
GETPAGESIZE(_Vmpagesize);
55
56
if(vd->incr <= 0) /* this is just _Vmheap on the first call */
57
vd->incr = _Vmpagesize*sizeof(Void_t*);
58
59
/* Get slightly more for administrative data */
60
s = size + sizeof(Seg_t) + sizeof(Block_t) + sizeof(Head_t) + 2*ALIGN;
61
if(s <= size) /* size was too large and we have wrapped around */
62
return NIL(Block_t*);
63
if((size = ROUND(s,vd->incr)) < s)
64
return NIL(Block_t*);
65
66
/* increase the rounding factor to reduce # of future extensions */
67
if(size > 2*vd->incr && vm->disc->round < vd->incr)
68
vd->incr *= 2;
69
70
if(!(seg = vd->seg) ) /* there is no current segment */
71
addr = NIL(Vmuchar_t*);
72
else /* see if we can extend the current segment */
73
{ addr = (Vmuchar_t*)(*vm->disc->memoryf)(vm,seg->addr,seg->extent,
74
seg->extent+size,vm->disc);
75
if(addr == (Vmuchar_t*)seg->addr)
76
addr += seg->extent; /* seg successfully extended */
77
else seg = NIL(Seg_t*); /* a new segment was created */
78
}
79
80
if(!addr) /* create a new segment */
81
{ if(!(addr = (Vmuchar_t*)(*vm->disc->memoryf)(vm, NIL(Void_t*), 0, size, vm->disc)) )
82
{ if(vm->disc->exceptf) /* announce that no more memory is available */
83
{
84
CLRLOCK(vm, 0);
85
(void)(*vm->disc->exceptf)(vm, VM_NOMEM, (Void_t*)size, vm->disc);
86
SETLOCK(vm, 0);
87
}
88
return NIL(Block_t*);
89
}
90
}
91
92
if(seg)
93
{ /* extending current segment */
94
bp = BLOCK(seg->baddr);
95
96
if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE) )
97
{ /**/ ASSERT((SIZE(bp)&~BITS) == 0);
98
/**/ ASSERT(SEG(bp) == seg);
99
100
if(!ISPFREE(SIZE(bp)) )
101
SIZE(bp) = size - sizeof(Head_t);
102
else
103
{ /**/ ASSERT(searchf);
104
bp = LAST(bp);
105
if(bp == vd->wild)
106
vd->wild = NIL(Block_t*);
107
else REMOVE(vd,bp,INDEX(SIZE(bp)),tp,(*searchf));
108
SIZE(bp) += size;
109
}
110
}
111
else
112
{ if(seg->free)
113
{ bp = seg->free;
114
seg->free = NIL(Block_t*);
115
SIZE(bp) += size;
116
}
117
else
118
{ SEG(bp) = seg;
119
SIZE(bp) = size - sizeof(Head_t);
120
}
121
}
122
123
seg->size += size;
124
seg->extent += size;
125
seg->baddr += size;
126
}
127
else
128
{ /* creating a new segment */
129
reg Seg_t *sp, *lastsp;
130
131
if((s = (size_t)(VLONG(addr)%ALIGN)) != 0)
132
addr += ALIGN-s;
133
134
seg = (Seg_t*)addr;
135
seg->vmdt = vd;
136
seg->addr = (Void_t*)(addr - (s ? ALIGN-s : 0));
137
seg->extent = size;
138
seg->baddr = addr + size - (s ? 2*ALIGN : 0);
139
seg->free = NIL(Block_t*);
140
bp = SEGBLOCK(seg);
141
SEG(bp) = seg;
142
SIZE(bp) = seg->baddr - (Vmuchar_t*)bp - 2*sizeof(Head_t);
143
144
/* NOTE: for Vmbest, Vmdebug and Vmprofile the region's segment list
145
is reversely ordered by addresses. This is so that we can easily
146
check for the wild block.
147
*/
148
lastsp = NIL(Seg_t*);
149
sp = vd->seg;
150
if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE))
151
for(; sp; lastsp = sp, sp = sp->next)
152
if(seg->addr > sp->addr)
153
break;
154
seg->next = sp;
155
if(lastsp)
156
lastsp->next = seg;
157
else vd->seg = seg;
158
159
seg->size = SIZE(bp);
160
}
161
162
/* make a fake header for possible segmented memory */
163
tp = NEXT(bp);
164
SEG(tp) = seg;
165
SIZE(tp) = BUSY;
166
167
/* see if the wild block is still wild */
168
if((tp = vd->wild) && (seg = SEG(tp)) != vd->seg)
169
{ np = NEXT(tp);
170
CLRPFREE(SIZE(np));
171
if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE) )
172
{ SIZE(tp) |= BUSY|JUNK;
173
LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
174
CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
175
}
176
else seg->free = tp;
177
178
vd->wild = NIL(Block_t*);
179
}
180
181
return bp;
182
}
183
184
/* Truncate a segment if possible */
185
#if __STD_C
186
static ssize_t _vmtruncate(Vmalloc_t* vm, Seg_t* seg, size_t size, int exact)
187
#else
188
static ssize_t _vmtruncate(vm, seg, size, exact)
189
Vmalloc_t* vm; /* containing region */
190
Seg_t* seg; /* the one to be truncated */
191
size_t size; /* amount of free space */
192
int exact;
193
#endif
194
{
195
reg Void_t* caddr;
196
reg Seg_t* last;
197
reg Vmdata_t* vd = vm->data;
198
reg Vmemory_f memoryf = vm->disc->memoryf;
199
200
caddr = seg->addr;
201
202
if(size < seg->size)
203
{ reg ssize_t less;
204
205
if(exact)
206
less = size;
207
else /* keep truncated amount to discipline requirements */
208
{ if((less = vm->disc->round) <= 0)
209
less = _Vmpagesize;
210
less = (size/less)*less;
211
less = (less/vd->incr)*vd->incr;
212
if(less > 0 && (ssize_t)size > less && (size-less) < sizeof(Block_t) )
213
less = less <= (ssize_t)vd->incr ? 0 : less - vd->incr;
214
}
215
216
if(less <= 0 ||
217
(*memoryf)(vm,caddr,seg->extent,seg->extent-less,vm->disc) != caddr)
218
return 0;
219
220
seg->extent -= less;
221
seg->size -= less;
222
seg->baddr -= less;
223
SEG(BLOCK(seg->baddr)) = seg;
224
SIZE(BLOCK(seg->baddr)) = BUSY;
225
226
return less;
227
}
228
else
229
{ /* unlink segment from region */
230
if(seg == vd->seg)
231
{ vd->seg = seg->next;
232
last = NIL(Seg_t*);
233
}
234
else
235
{ for(last = vd->seg; last->next != seg; last = last->next)
236
;
237
last->next = seg->next;
238
}
239
240
/* now delete it */
241
if((*memoryf)(vm,caddr,seg->extent,0,vm->disc) == caddr)
242
return size;
243
244
/* space reduction failed, reinsert segment */
245
if(last)
246
{ seg->next = last->next;
247
last->next = seg;
248
}
249
else
250
{ seg->next = vd->seg;
251
vd->seg = seg;
252
}
253
return 0;
254
}
255
}
256
257
int _vmlock(Vmalloc_t* vm, int locking)
258
{
259
if(!vm) /* some sort of global locking */
260
{ if(!locking) /* turn off lock */
261
asolock(&_Vmlock, 1, ASO_UNLOCK);
262
else asolock(&_Vmlock, 1, ASO_SPINLOCK);
263
}
264
else if(vm->data->mode&VM_SHARE)
265
{ if(!locking) /* turning off the lock */
266
asolock(&vm->data->lock, 1, ASO_UNLOCK);
267
else asolock(&vm->data->lock, 1, ASO_SPINLOCK);
268
}
269
else
270
{ if(!locking)
271
vm->data->lock = 0;
272
else vm->data->lock = 1;
273
}
274
return 0;
275
}
276
277
278
/* Externally visible names but local to library */
279
Vmextern_t _Vmextern =
280
{ _vmextend, /* _Vmextend */
281
_vmtruncate, /* _Vmtruncate */
282
0, /* _Vmpagesize */
283
NIL(char*(*)_ARG_((char*,const char*,int))), /* _Vmstrcpy */
284
NIL(char*(*)_ARG_((Vmulong_t,int))), /* _Vmitoa */
285
NIL(void(*)_ARG_((Vmalloc_t*,
286
Vmuchar_t*,Vmuchar_t*,size_t,size_t))), /* _Vmtrace */
287
NIL(void(*)_ARG_((Vmalloc_t*))) /* _Vmpfclose */
288
};
289
290
#endif
291
292