Note: This website is archived. For up-to-date information about D projects and development, please visit wiki.dlang.org.

root/trunk/alloc.d

Revision 287, 71.0 kB (checked in by dsimcha, 14 years ago)

These should be static to work around bugs in GDC.

Line 
1 /**Stuff having to do with memory management.  Mostly TempAlloc and some data
2  * structure implementations that go with it.
3  *
4  * Author:  David Simcha*/
5  /*
6  * License:
7  * Boost Software License - Version 1.0 - August 17th, 2003
8  *
9  * Permission is hereby granted, free of charge, to any person or organization
10  * obtaining a copy of the software and accompanying documentation covered by
11  * this license (the "Software") to use, reproduce, display, distribute,
12  * execute, and transmit the Software, and to prepare derivative works of the
13  * Software, and to permit third-parties to whom the Software is furnished to
14  * do so, all subject to the following:
15  *
16  * The copyright notices in the Software and this entire statement, including
17  * the above license grant, this restriction and the following disclaimer,
18  * must be included in all copies of the Software, in whole or in part, and
19  * all derivative works of the Software, unless such copies or derivative
20  * works are solely in the form of machine-executable object code generated by
21  * a source language processor.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
26  * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
27  * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
28  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31
32 module dstats.alloc;
33
34 import std.traits, core.memory, std.array, std.range, core.exception,
35     std.functional, std.math, std.algorithm : max;
36
37 import core.stdc.stdio;
38
39 static import core.stdc.stdlib;
40
41 import dstats.base;
42
43 version(unittest) {
44     import std.stdio, std.conv, std.random, dstats.sort;
45     void main() {}
46 }
47
48 // This is just for convenience/code readability/saving typing.
49 enum ptrSize = (void*).sizeof;
50
51 // This was accidentally assumed in a few places and I'm too lazy to fix it
52 // until I see proof that it needs to be fixed.
53 static assert(bool.sizeof == 1);
54
55 template IsType(T, Types...) {
56     // Original idea by Burton Radons, modified
57     static if (Types.length == 0)
58         const bool IsType = false;
59     else
60         const bool IsType = is(T == Types[0]) || IsType!(T, Types[1 .. $]);
61 }
62
63 template ArrayType1(T: T[]) {
64     alias T ArrayType1;
65 }
66
67 template isReferenceType(Types...) {  //Thanks to Bearophile.
68     static if (Types.length == 0) {
69         const bool isReferenceType = false;
70     } else static if (Types.length == 1) {
71         static if (IsType!(Unqual!(Types[0]), bool, byte, ubyte, short, ushort,
72                            int, uint, long, ulong, float, double, real, ifloat,
73                            idouble, ireal, cfloat, cdouble, creal, char, dchar,
74                            wchar) ) {
75             const bool isReferenceType = false;
76         } else static if ( is(Types[0] == struct) ) {
77             const bool isReferenceType =
78             isReferenceType!(FieldTypeTuple!(Types[0]));
79         } else static if (isStaticArray!(Types[0])) {
80             const bool isReferenceType = isReferenceType!(ArrayType1!(Types[0]));
81         } else
82             const bool isReferenceType = true;
83     } else
84         const bool isReferenceType = isReferenceType!(Types[0]) |
85         isReferenceType!(Types[1 .. $]);
86 } // end isReferenceType!()
87
88 unittest {
89     static assert(!isReferenceType!(typeof("Foo"[0])));
90     static assert(isReferenceType!(uint*));
91     static assert(!isReferenceType!(int[3]));
92     struct noPtrs {
93         uint f;
94         uint b;
95     }
96     struct ptrs {
97         uint* f;
98         uint b;
99     }
100     static assert(!isReferenceType!(noPtrs));
101     static assert(isReferenceType!(ptrs));
102 }
103
104 template blockAttribute(T) {
105     static if (isReferenceType!(T))
106         enum blockAttribute = 0;
107     else enum blockAttribute = GC.BlkAttr.NO_SCAN;
108 }
109
110 ///Returns a new array of type T w/o initializing elements.
111 T[] newVoid(T)(size_t length) {
112     T* ptr = cast(T*) GC.malloc(length * T.sizeof, blockAttribute!(T));
113     return ptr[0..length];
114 }
115
116 void lengthVoid(T)(ref T[] input, int newLength) {
117     input.lengthVoid(cast(size_t) newLength);
118 }
119
120 ///Lengthens an array w/o initializing new elements.
121 void lengthVoid(T)(ref T[] input, size_t newLength) {
122     if (newLength <= input.length ||
123             GC.sizeOf(input.ptr) >= newLength * T.sizeof) {
124         input = input.ptr[0..newLength];  //Don't realloc if I don't have to.
125     } else {
126         T* newPtr = cast(T*) GC.realloc(input.ptr,
127                                         T.sizeof * newLength, blockAttribute!(T));
128         input = newPtr[0..newLength];
129     }
130 }
131
132 private template Appends(T, U) {
133     enum bool Appends = AppendsImpl!(T, U).ret;
134 }
135
136 private template AppendsImpl(T, U) {
137     T[] a;
138     U b;
139     enum bool ret = is(typeof(a ~= b));
140 }
141
142 ///Appends to an array, deleting the old array if it has to be realloced.
143 void appendDelOld(T, U)(ref T[] to, U from)
144 if(Appends!(T, U)) {
145     auto old = to;
146     to ~= from;
147     if (old.ptr !is to.ptr && old.ptr !is null) delete old;
148 }
149
150 unittest {
151     uint[] foo;
152     foo.appendDelOld(5);
153     foo.appendDelOld(4);
154     foo.appendDelOld(3);
155     foo.appendDelOld(2);
156     foo.appendDelOld(1);
157     assert(foo == cast(uint[]) [5,4,3,2,1]);
158 }
159
160 // Memory allocation routines.  These wrap malloc(), free() and realloc(),
161 // and guarantee alignment.
162 private enum size_t alignBytes = 16;
163
164
165 static void outOfMemory()  {
166     throw new OutOfMemoryError("Out of memory in TempAlloc.");
167 }
168
169 private void* alignedMalloc(size_t size, bool shouldAddRange = false) {
170     // We need (alignBytes - 1) extra bytes to guarantee alignment, 1 byte
171     // to store the shouldAddRange flag, and ptrSize bytes to store
172     // the pointer to the beginning of the block.
173     void* toFree = core.stdc.stdlib.malloc(
174         alignBytes + ptrSize + size
175     );
176
177     if(toFree is null) outOfMemory();
178
179     // Add the offset for the flag and the base pointer.
180     auto intPtr = cast(size_t) toFree + ptrSize + 1;
181
182     // Align it.
183     intPtr = (intPtr + alignBytes - 1) & (~(alignBytes - 1));
184     auto ret = cast(void**) intPtr;
185
186     // Store base pointer.
187     (cast(void**) ret)[-1] = toFree;
188
189     // Store flag.
190     (cast(bool*) ret)[-1 - ptrSize] = shouldAddRange;
191
192     if(shouldAddRange) {
193         GC.addRange(ret, size);
194     }
195
196     return ret;
197 }
198
199 private void alignedFree(void* ptr) {
200     // If it was allocated with alignedMalloc() then the pointer to the
201     // beginning is at ptr[-1].
202     auto addedRange = (cast(bool*) ptr)[-1 - ptrSize];
203
204     if(addedRange) {
205         GC.removeRange(ptr);
206     }
207
208     core.stdc.stdlib.free( (cast(void**) ptr)[-1]);
209 }
210
211 private void* alignedRealloc(void* ptr, size_t newLen, size_t oldLen) {
212     auto storedRange = (cast(bool*) ptr)[-1 - ptrSize];
213     auto newPtr = alignedMalloc(newLen, storedRange);
214     memcpy(newPtr, ptr, oldLen);
215
216     alignedFree(ptr);
217     return newPtr;
218 }
219
220 /**A struct to allocate memory in a strictly first-in last-out order for
221  * things like scratch space.  Technically, memory can safely escape the
222  * scope in which it was allocated.  However, this is a very bad idea
223  * unless being done within the private API of a class, struct or nested
224  * function, where it can be guaranteed that LIFO will not be violated.
225  *
226  * Under the hood, this works by allocating large blocks (currently 4 MB)
227  * from the GC, and sub-allocating these as a stack.  Very large allocations
228  * (currently > 4MB) are simply performed on the heap.  There are two ways to
229  * free memory:  Calling TempAlloc.free() frees the last allocated block.
230  * Calling TempAlloc.frameFree() frees all memory allocated since the last
231  * call to TempAlloc.frameInit().
232  *
233  * All allocations are aligned on 16-byte boundaries using padding, since on x86,
234  * 16-byte alignment is necessary to make SSE2 work.  Note, however, that this
235  * is implemented based on the assumption that the GC allocates using 16-byte
236  * alignment (which appears to be true in druntime.)
237  */
238 struct TempAlloc {
239 private:
240     static struct Stack(T) {  // Simple, fast stack w/o error checking.
241         private size_t capacity;
242         private size_t index;
243         private T* data;
244         private enum sz = T.sizeof;
245
246         private static size_t max(size_t lhs, size_t rhs) pure {
247             return (rhs > lhs) ? rhs : lhs;
248         }
249
250         void push(T elem) {
251             if (capacity == index) {
252                 capacity = max(16, capacity * 2);
253                 data = cast(T*) core.stdc.stdlib.realloc(data, capacity * sz);
254             }
255             data[index++] = elem;
256         }
257
258         T pop() {
259             index--;
260             auto ret = data[index];
261             data[index] = T.init;  // Prevent false ptrs.
262             return ret;
263         }
264
265         void destroy() {
266             if(data) {
267                 core.stdc.stdlib.free(data);
268                 data = null;
269             }
270         }
271     }
272
273     struct Block {
274         size_t used = 0;
275         void* space = null;
276     }
277
278     final class State {
279         size_t used;
280         void* space;
281         size_t totalAllocs;
282         void*[] lastAlloc;
283         uint nblocks;
284         uint nfree;
285         size_t frameIndex;
286
287         // inUse holds info for all blocks except the one currently being
288         // allocated from.  freelist holds space ptrs for all free blocks.
289         Stack!(Block) inUse;
290         Stack!(void*) freelist;
291
292         void putLast(void* last) {
293             // Add an element to lastAlloc, checking length first.
294             if (totalAllocs == lastAlloc.length)
295                 doubleSize(lastAlloc);
296             lastAlloc[totalAllocs] = cast(void*) last;
297             totalAllocs++;
298         }
299
300         void destroy() {
301             if(space) {
302                 alignedFree(space);
303                 space = null;
304             }
305
306             if(lastAlloc) {
307                 core.stdc.stdlib.free(lastAlloc.ptr);
308                 lastAlloc = null;
309             }
310
311             while(inUse.index > 0) {
312                 auto toFree = inUse.pop();
313                 alignedFree(toFree.space);
314             }
315
316             while(freelist.index > 0) {
317                 auto toFree = freelist.pop();
318                 alignedFree(toFree);
319             }
320
321             inUse.destroy();
322             freelist.destroy();
323         }
324
325         ~this() {
326             destroy();
327         }
328     }
329
330     static ~this() {
331         if(state) {
332             state.destroy();
333             state = null;
334         }
335     }
336
337     enum size_t alignBytes = 16U;
338     enum size_t blockSize = 4U * 1024U * 1024U;
339     enum size_t nBookKeep = blockSize / alignBytes * ptrSize;
340     static State state;
341
342     static void doubleSize(ref void*[] lastAlloc) {
343         size_t newSize = lastAlloc.length * 2;
344         void** ptr = cast(void**) core.stdc.stdlib.realloc(
345             lastAlloc.ptr, newSize * ptrSize);
346         lastAlloc = ptr[0..newSize];
347     }
348
349     static State stateInit() {
350         State stateCopy;
351         try { stateCopy = new State; } catch { outOfMemory(); }
352
353         with(stateCopy) {
354             space = alignedMalloc(blockSize);
355
356             // We don't need 16-byte alignment for the bookkeeping array.
357             lastAlloc = (cast(void**) core.stdc.stdlib.malloc(nBookKeep))
358                         [0..nBookKeep / ptrSize];
359             nblocks++;
360         }
361
362         state = stateCopy;
363         return stateCopy;
364     }
365
366     static size_t getAligned(size_t nbytes) pure {
367         // Only works if alignBytes is a power of two, but I think that's
368         // a pretty safe assumption.
369         return (nbytes + (alignBytes - 1)) & (~(alignBytes - 1));
370     }
371
372 public:
373     /**Allows caller to cache the state class on the stack and pass it in as a
374      * parameter.  This is ugly, but results in a speed boost that can be
375      * significant in some cases because it avoids a thread-local storage
376      * lookup.  Also used internally.*/
377     static State getState() {
378         State stateCopy = state;
379         return (stateCopy is null) ? stateInit : stateCopy;
380     }
381
382     /**Initializes a frame, i.e. marks the current allocation position.
383      * Memory past the position at which this was last called will be
384      * freed when frameFree() is called.  Returns a reference to the
385      * State class in case the caller wants to cache it for speed.*/
386     static State frameInit() {
387         return frameInit(getState);
388     }
389
390     /**Same as frameInit() but uses stateCopy cached on stack by caller
391      * to avoid a thread-local storage lookup.  Strictly a speed hack.*/
392     static State frameInit(State stateCopy) {
393         with(stateCopy) {
394             putLast( cast(void*) frameIndex );
395             frameIndex = totalAllocs;
396         }
397         return stateCopy;
398     }
399
400     /**Frees all memory allocated by TempAlloc since the last call to
401      * frameInit().*/
402     static void frameFree() {
403         frameFree(getState);
404     }
405
406     /**Same as frameFree() but uses stateCopy cached on stack by caller
407     * to avoid a thread-local storage lookup.  Strictly a speed hack.*/
408     static void frameFree(State stateCopy) {
409         with(stateCopy) {
410             while (totalAllocs > frameIndex) {
411                 free(stateCopy);
412             }
413             frameIndex = cast(size_t) lastAlloc[--totalAllocs];
414         }
415     }
416
417     /**Purely a convenience overload, forwards arguments to TempAlloc.malloc().*/
418     static void* opCall(T...)(T args) {
419         return TempAlloc.malloc(args);
420     }
421
422     /**Allocates nbytes bytes on the TempAlloc stack.  NOT safe for real-time
423      * programming, since if there's not enough space on the current block,
424      * a new one will automatically be created.  Also, very large objects
425      * (currently over 4MB) will simply be heap-allocated.
426      *
427      * Bugs:  Memory allocated by TempAlloc is not scanned by the GC.
428      * This is necessary for performance and to avoid false pointer issues.
429      * Do not store the only reference to a GC-allocated object in
430      * TempAlloc-allocated memory.*/
431     static void* malloc(size_t nbytes) {
432         return malloc(nbytes, getState);
433     }
434
435     /**Same as malloc() but uses stateCopy cached on stack by caller
436     * to avoid a thread-local storage lookup.  Strictly a speed hack.*/
437     static void* malloc(size_t nbytes, State stateCopy) {
438         nbytes = getAligned(nbytes);
439         with(stateCopy) {
440             void* ret;
441             if (blockSize - used >= nbytes) {
442                 ret = space + used;
443                 used += nbytes;
444             } else if (nbytes > blockSize) {
445                 ret = alignedMalloc(nbytes);
446             } else if (nfree > 0) {
447                 inUse.push(Block(used, space));
448                 space = freelist.pop;
449                 used = nbytes;
450                 nfree--;
451                 nblocks++;
452                 ret = space;
453             } else { // Allocate more space.
454                 inUse.push(Block(used, space));
455                 space = alignedMalloc(blockSize);
456                 nblocks++;
457                 used = nbytes;
458                 ret = space;
459             }
460             putLast(ret);
461             return ret;
462         }
463     }
464
465     /**Frees the last piece of memory allocated by TempAlloc.  Since
466      * all memory must be allocated and freed in strict LIFO order,
467      * there's no need to pass a pointer in.  All bookkeeping for figuring
468      * out what to free is done internally.*/
469     static void free() {
470         free(getState);
471     }
472
473     /**Same as free() but uses stateCopy cached on stack by caller
474     * to avoid a thread-local storage lookup.  Strictly a speed hack.*/
475     static void free(State stateCopy) {
476         with(stateCopy) {
477             void* lastPos = lastAlloc[--totalAllocs];
478
479             // Handle large blocks.
480             if (lastPos > space + blockSize || lastPos < space) {
481                 alignedFree(lastPos);
482                 return;
483             }
484
485             used = (cast(size_t) lastPos) - (cast(size_t) space);
486             if (nblocks > 1 && used == 0) {
487                 freelist.push(space);
488                 Block newHead = inUse.pop;
489                 space = newHead.space;
490                 used = newHead.used;
491                 nblocks--;
492                 nfree++;
493
494                 if (nfree >= nblocks * 2) {
495                     foreach(i; 0..nfree / 2) {
496                         alignedFree(freelist.pop);
497                         nfree--;
498                     }
499                 }
500             }
501         }
502     }
503
504     /**Returns how many bytes are available in the current frame.*/
505     static size_t slack() @property {
506         return blockSize - getState().used;
507     }
508
509 }
510
511 /**Allocates an array of type T and size size using TempAlloc.
512  * Note that appending to this array using the ~= operator,
513  * or enlarging it using the .length property, will result in
514  * undefined behavior.  This is because, if the array is located
515  * at the beginning of a TempAlloc block, the GC will think the
516  * capacity is as large as a TempAlloc block, and will overwrite
517  * adjacent TempAlloc-allocated data, instead of reallocating it.
518  *
519  * Bugs: Do not store the only reference to a GC-allocated reference object
520  * in an array allocated by newStack because this memory is not
521  * scanned by the GC.*/
522 T[] newStack(T)(size_t size, TempAlloc.State state = null) {
523     if(state is null) {
524         state = TempAlloc.getState();
525     }
526
527     size_t bytes = size * T.sizeof;
528     T* ptr = cast(T*) TempAlloc.malloc(bytes, state);
529     return ptr[0..size];
530 }
531
532 ///**Same as newStack(size_t) but uses stateCopy cached on stack by caller
533 //* to avoid a thread-local storage lookup.  Strictly a speed hack.*/
534 //T[] newStack(T)(size_t size, TempAlloc.State state) nothrow {
535 //    size_t bytes = size * T.sizeof;
536 //    T* ptr = cast(T*) TempAlloc.malloc(bytes, state);
537 //    return ptr[0..size];
538 //}
539
540 /**Concatenate any number of arrays of the same type, placing results on
541  * the TempAlloc stack.*/
542 T[0] stackCat(T...)(T data) {
543     foreach(array; data) {
544         static assert(is(typeof(array) == typeof(data[0])));
545     }
546
547     size_t totalLen = 0;
548     foreach(array; data) {
549         totalLen += array.length;
550     }
551     auto ret = newStack!(Unqual!(typeof(T[0][0])))(totalLen);
552
553     size_t offset = 0;
554     foreach(array; data) {
555         ret[offset..offset + array.length] = array[0..$];
556         offset += array.length;
557     }
558     return cast(T[0]) ret;
559 }
560
561 void rangeCopy(T, U)(T to, U from) {
562     static if(is(typeof(to[] = from[]))) {
563         to[] = from[];
564     } else static if(isRandomAccessRange!(T)) {
565         size_t i = 0;
566         foreach(elem; from) {
567             to[i++] = elem;
568         }
569     }
570 }
571
572 /**Creates a duplicate of a range for temporary use within a function in the
573  * best wsy that can be done safely.  If ElementType!(T) is a value type
574  * or T is an array, the results can safely be placed in TempAlloc because
575  * either it doesn't need to be scanned by the GC or there's guaranteed to be
576  * another reference to the contents somewhere. Otherwise, the results
577  * are placed on the GC heap.
578  *
579  * This function is much faster if T has a length, but works even if it doesn't.
580  */
581 Unqual!(ElementType!(T))[] tempdup(T)(T data)
582 if(isInputRange!(T) && (isArray!(T) || !isReferenceType!(ElementType!(T)))) {
583     alias ElementType!(T) E;
584     alias Unqual!(E) U;
585     static if(dstats.base.hasLength!(T)) {
586         U[] ret = newStack!(U)(data.length);
587         rangeCopy(ret, data);
588         return ret;
589     } else {
590         auto state = TempAlloc.getState;
591         auto startPtr = TempAlloc(0, state);
592         size_t bytesCopied = 0;
593
594         while(!data.empty) {  // Make sure range interface is being used.
595             auto elem = data.front;
596             if(state.used + U.sizeof <= TempAlloc.blockSize) {
597                 data.popFront;
598                 *(cast(U*) (startPtr + bytesCopied)) = elem;
599                 bytesCopied += U.sizeof;
600                 state.used += U.sizeof;
601             } else {
602                 if(bytesCopied + U.sizeof >= TempAlloc.blockSize / 2) {
603                     // Then just heap-allocate.
604                     U[] result = (cast(U*) alignedMalloc(bytesCopied * 2))
605                         [0..bytesCopied / U.sizeof * 2];
606
607                     immutable elemsCopied = bytesCopied / U.sizeof;
608                     result[0..elemsCopied] = (cast(U*) startPtr)[0..elemsCopied];
609                     finishCopy(result, data, elemsCopied);
610                     TempAlloc.free;
611                     state.putLast(result.ptr);
612                     return result;
613                 } else {
614                     U[] oldData = (cast(U*) startPtr)[0..bytesCopied / U.sizeof];
615                     state.used -= bytesCopied;
616                     state.totalAllocs--;
617                     U[] newArray = newStack!(U)(bytesCopied / U.sizeof + 1, state);
618                     newArray[0..oldData.length] = oldData[];
619                     startPtr = state.space;
620                     newArray[$ - 1] = elem;
621                     bytesCopied += U.sizeof;
622                     data.popFront;
623                 }
624             }
625         }
626         auto rem = bytesCopied % TempAlloc.alignBytes;
627         if(rem != 0) {
628             auto toAdd = 16 - rem;
629             if(state.used + toAdd < TempAlloc.blockSize) {
630                 state.used += toAdd;
631             } else {
632                 state.used = TempAlloc.blockSize;
633             }
634         }
635         return (cast(U*) startPtr)[0..bytesCopied / U.sizeof];
636     }
637 }
638
639 Unqual!(ElementType!(T))[] tempdup(T)(T data)
640 if(isInputRange!(T) && !(isArray!(T) || !isReferenceType!(ElementType!(T)))) {
641     // Initial guess of how much space to allocate.  It's relatively large b/c
642     // the object will be short lived, so speed is more important than space
643     // efficiency.
644     enum initialGuess = 128;
645
646     alias Unqual!(ElementType!T) E;
647     auto arr = (cast(E*) alignedMalloc(E.sizeof * initialGuess, true))
648         [0..initialGuess];
649
650     finishCopy(arr, data, 0);
651     TempAlloc.getState.putLast(arr.ptr);
652     return arr;
653 }
654
655 // Finishes copying a range to a C heap allocated array.  Assumes the first
656 // half of the input array is stuff already copied and the second half is
657 // free space.
658 private void finishCopy(T, U)(ref T[] result, U range, size_t alreadyCopied) {
659     void doRealloc() {
660         auto newPtr = cast(T*) alignedRealloc(
661             result.ptr, result.length * T.sizeof * 2, result.length * T.sizeof
662         );
663         result = newPtr[0..result.length * 2];
664     }
665
666     auto index = alreadyCopied;
667     foreach(elem; range) {
668         if(index == result.length) doRealloc();
669         result[index++] = elem;
670     }
671
672     result = result[0..index];
673 }
674
675 // See Bugzilla 2873.  This can be removed once that's fixed.
676 template hasLength(R) {
677     enum bool hasLength = is(typeof(R.init.length) : ulong) ||
678                       is(typeof(R.init.length()) : ulong);
679 }
680
681 // Now that Phobos does this well, this just forwards to Phobos.
682 Unqual!(IterType!(T))[] toArray(T)(T range) if(isIterable!(T)) {
683     return std.array.array(range);
684 //    static if(isArray!(T)) {
685 //        // Allow fast copying by assuming that the input is an array.
686 //        return range.dup;
687 //    } else static if(hasLength!(T)) {
688 //        // Preallocate array, then copy.
689 //        auto ret = newVoid!(Unqual!(IterType!(T)))(range.length);
690 //        static if(is(typeof(ret[] = range[]))) {
691 //            ret[] = range[];
692 //        } else {
693 //            size_t pos = 0;
694 //            foreach(elem; range) {
695 //                ret[pos++] = elem;
696 //            }
697 //        }
698 //        return ret;
699 //    } else {
700 //        // Don't have length, have to use appending.
701 //        Unqual!(IterType!(T))[] ret;
702 //        auto app = appender(&ret);
703 //        foreach(elem; range) {
704 //            app.put(elem);
705 //        }
706 //        return ret;
707 //    }
708 }
709
710 unittest {
711     // Create quick and dirty finite but lengthless range.
712     static struct Count {
713         uint num;
714         uint upTo;
715         @property size_t front() {
716             return num;
717         }
718         void popFront() {
719             num++;
720         }
721         @property bool empty() {
722             return num >= upTo;
723         }
724     }
725
726     TempAlloc(1024 * 1024 * 3);
727     Count count;
728     count.upTo = 1024 * 1025;
729     auto asArray = tempdup(count);
730     foreach(i, elem; asArray) {
731         assert(i == elem, to!(string)(i) ~ "\t" ~ to!(string)(elem));
732     }
733     assert(asArray.length == 1024 * 1025);
734     TempAlloc.free;
735     TempAlloc.free;
736     while(TempAlloc.getState.freelist.index > 0) {
737         alignedFree(TempAlloc.getState.freelist.pop);
738     }
739 }
740
741 /**A string to mixin at the beginning of a scope, purely for
742  * convenience.  Initializes a TempAlloc frame using frameInit(),
743  * and inserts a scope statement to delete this frame at the end
744  * of the current scope.
745  *
746  * Slower than calling free() manually when only a few pieces
747  * of memory will be allocated in the current scope, due to the
748  * extra bookkeeping involved.  Can be faster, however, when
749  * large amounts of allocations, such as arrays of arrays,
750  * are allocated, due to caching of data stored in thread-local
751  * storage.*/
752 immutable char[] newFrame =
753     "TempAlloc.frameInit; scope(exit) TempAlloc.frameFree;";
754
755 unittest {
756     /* Not a particularly good unittest in that it depends on knowing the
757      * internals of TempAlloc, but it's the best I could come up w/.  This
758      * is really more of a stress test/sanity check than a normal unittest.*/
759
760     // Make sure state is completely reset.
761     if(TempAlloc.state) TempAlloc.state.destroy();
762     TempAlloc.state = null;
763
764      // First test to make sure a large number of allocations does what it's
765      // supposed to in terms of reallocing lastAlloc[], etc.
766      enum nIter =  TempAlloc.blockSize * 5 / TempAlloc.alignBytes;
767      foreach(i; 0..nIter) {
768          TempAlloc(TempAlloc.alignBytes);
769      }
770      assert(TempAlloc.getState.nblocks == 5, to!string(TempAlloc.getState.nblocks));
771      assert(TempAlloc.getState.nfree == 0);
772      foreach(i; 0..nIter) {
773         TempAlloc.free;
774     }
775     assert(TempAlloc.getState.nblocks == 1);
776     assert(TempAlloc.getState.nfree == 2);
777
778     // Make sure logic for freeing excess blocks works.  If it doesn't this
779     // test will run out of memory.
780     enum allocSize = TempAlloc.blockSize / 2;
781     foreach(i; 0..50) {
782         foreach(j; 0..50) {
783             TempAlloc(allocSize);
784         }
785         foreach(j; 0..50) {
786             TempAlloc.free;
787         }
788     }
789
790     // Make sure data is stored properly.
791     foreach(i; 0..10) {
792         TempAlloc(allocSize);
793     }
794     foreach(i; 0..5) {
795         TempAlloc.free;
796     }
797     void* space = TempAlloc.state.space;
798     size_t used = TempAlloc.state.used;
799
800     TempAlloc.frameInit;
801     // This array of arrays should not be scanned by the GC because otherwise
802     // bugs caused th not having the GC scan certain internal things in
803     // TempAlloc that it should would not be exposed.
804     uint[][] arrays = (cast(uint[]*) GC.malloc((uint[]).sizeof * 10,
805                        GC.BlkAttr.NO_SCAN))[0..10];
806     foreach(i; 0..10) {
807         uint[] data = newStack!(uint)(250_000);
808         foreach(j, ref e; data) {
809             e = cast(uint) (j * (i + 1));  // Arbitrary values that can be read back later.
810         }
811         arrays[i] = data;
812     }
813
814     // Make stuff get overwrriten if blocks are getting GC'd when they're not
815     // supposed to.
816     GC.minimize;  // Free up all excess pools.
817     uint[][] foo;
818     foreach(i; 0..40) {
819         foo ~= new uint[1_048_576];
820     }
821     foo = null;
822
823     for(size_t i = 9; i != size_t.max; i--) {
824         foreach(j, e; arrays[i]) {
825             assert(e == j * (i + 1));
826         }
827     }
828     TempAlloc.frameFree;
829     assert(space == TempAlloc.state.space);
830     assert(used == TempAlloc.state.used);
831     while(TempAlloc.state.nblocks > 1 || TempAlloc.state.used > 0) {
832         TempAlloc.free;
833     }
834
835     // Test that everything is really getting destroyed properly when
836     // destroy() is called.  If not then this test will run out of memory.
837     foreach(i; 0..1000) {
838         TempAlloc.state.destroy();
839         TempAlloc.state = null;
840
841         foreach(j; 0..1_000) {
842             auto ptr = TempAlloc.malloc(20_000);
843             assert((cast(size_t) ptr) % TempAlloc.alignBytes == 0);
844         }
845
846         foreach(j; 0..500) {
847             TempAlloc.free();
848         }
849     }
850 }
851
852 struct SHNode(K, V) {
853     alias SHNode!(K, V) SomeType;
854     SomeType* next;
855     Unqual!(K) key;
856     Unqual!(V) val;
857 }
858
859 /**Forward range struct for iterating over the keys or values of a
860  * StackHash or StackSet.  The lifetime of this object must not exceed that
861  * of the underlying StackHash or StackSet.*/
862 struct HashRange(K, S, bool vals = false) {
863 private:
864     S* set;
865     size_t index;
866     S.Node* next;
867     K* frontElem;
868     size_t _length;
869
870     this(S* set) {
871         this.set = set;
872         if(set.rNext[0] == set.usedSentinel) {
873             this.popFront;
874         } else {
875             static if(vals) {
876                 frontElem = set.rVals.ptr;
877             } else {
878                 frontElem = set.rKeys.ptr;
879             }
880             next = set.rNext[0];
881         }
882         this._length = set.length;
883     }
884
885 public:
886     ///
887     void popFront()
888     in {
889         assert(!empty);
890     } body {
891         this._length--;
892         if(next is null) {
893             do {
894                 index++;
895                 if(index >= set.rNext.length) {
896                     index = size_t.max;  // Sentinel for empty.
897                     return;
898                 }
899                 next = set.rNext[index];
900             } while(set.rNext[index] == set.usedSentinel);
901             static if(vals) {
902                 frontElem = &(set.rVals[index]);
903             } else {
904                 frontElem = &(set.rKeys[index]);
905             }
906         } else {
907             static if(vals) {
908                 frontElem = &(next.val);
909             } else {
910                 frontElem = &(next.key);
911             }
912             next = next.next;
913         }
914     }
915
916     ///
917     static if(vals) {
918         @property ref Unqual!(K) front()
919         in {
920             assert(!empty);
921         } body {
922             return *frontElem;
923         }
924     } else {
925        @property Unqual!(K) front()
926        in {
927             assert(!empty);
928        } body {
929             return *frontElem;
930         }
931     }
932
933     ///
934     @property bool empty() {
935         return index == size_t.max;
936     }
937
938     ///
939     @property size_t length() {
940         return _length;
941     }
942
943     ///
944     @property typeof(this) save() {
945         return this;
946     }
947 }
948
949 /**A hash table that allocates its memory on TempAlloc.  Good for building a
950  * temporary hash tables that will not escape the current scope.
951  *
952  * To avoid TempAlloc memory leaks, use mixin(newFrame).
953  *
954  * Examples:
955  * ---
956  * mixin(newFrame);  // To make sure all memory gets freed at end of scope.
957  * auto ss = StackHash!(uint)(5);
958  * foreach(i; 0..5) {
959  *     ss[i]++;
960  * }
961  * assert(ss[3] == 1);
962  * ---
963  *
964  * Warning:
965  * This implementation places removed nodes on an internal free list and
966  * recycles them, since there is no way to delete TempAlloc-allocated data
967  * in a non-LIFO order.  Therefore, you may not retain the address of a
968  * variable stored in a StackHash after deleting it from the StachHash.
969  * For example, DO NOT do this:
970  * ---
971  * SomeType* myPtr = &(myStackHash["foo"]);
972  * myStackHash.remove("foo");
973  * *myPtr = someValue;
974  * ---
975  */
976 struct StackHash(K, V) {
977 private:
978     alias SHNode!(K, V) Node;
979
980     // Using parallel arrays instead of structs to save on alignment overhead:
981     Unqual!(K)[] rKeys;
982     Unqual!(V)[] rVals;
983     Unqual!(Node*)[] rNext;
984
985     // Holds nodes that were deleted by remove().
986     Node** freeList;
987
988     TempAlloc.State TAState;
989     size_t _length;
990
991     // Tries to allocate off the free list.  Otherwise allocates off
992     // TempAlloc.
993     Node* allocNode() {
994         if(*freeList is null) {
995             return cast(Node*) TempAlloc(Node.sizeof, TAState);
996         }
997         auto ret = *freeList;
998         *freeList = (*freeList).next;
999         return ret;
1000     }
1001
1002     // Add a removed node to the free list.
1003     void pushFreeList(Node* node) {
1004         if(*freeList is null) {
1005             node.next = null;  // Sentinel
1006             *freeList = node;
1007         } else {
1008             node.next = *freeList;
1009             *freeList = node;
1010         }
1011     }
1012
1013     // rNext.ptr is stored in elements of rNext as a sentinel to indicate
1014     // that the corresponding slot is unused.
1015     Node* usedSentinel() @property {
1016         return cast(Node*) rNext.ptr;
1017     }
1018
1019     Node* newNode(K key) {
1020         Node* ret = allocNode();
1021         ret.key =  key;
1022         ret.val =  V.init;
1023         ret.next = null;
1024         return ret;
1025     }
1026
1027     Node* newNode(K key, V val) {
1028         Node* ret = allocNode();
1029         ret.key =  key;
1030         ret.val = val;
1031         ret.next = null;
1032         return ret;
1033     }
1034
1035     hash_t getHash(K key) {
1036         static if(is(K : long) && K.sizeof <= hash_t.sizeof) {
1037             hash_t hash = cast(hash_t) key;
1038         } else static if(__traits(compiles, key.toHash())) {
1039             hash_t hash = key.toHash();
1040         } else {
1041             hash_t hash = typeid(K).getHash(&key);
1042         }
1043         hash %= rNext.length;
1044         return hash;
1045     }
1046
1047
1048 public:
1049     /**Due to the nature of TempAlloc, you must specify on object creation
1050      * the approximate number of elements your table will have.  Too large a
1051      * number will waste space and incur poor cache performance.  Too low a
1052      * number will make this struct perform like a linked list.  Generally,
1053      * if you're building a table from some other range, some fraction of the
1054      * size of that range is a good guess.*/
1055     this(size_t nElem) {
1056         // Obviously, the caller can never mean zero, because this struct
1057         // can't work at all with nElem == 0, so assume it's a mistake and fix
1058         // it here.
1059         if(nElem == 0)
1060             nElem++;
1061         TAState = TempAlloc.getState;
1062         rKeys = newStack!(K)(nElem, TAState);
1063         rVals = newStack!(V)(nElem, TAState);
1064
1065         // Allocate free list in same block with Node ptrs.  That's what the
1066         // + 1 is for.
1067         rNext = newStack!(Node*)(nElem + 1, TAState);
1068         freeList = &(rNext[$ - 1]);
1069         *freeList = null;
1070         rNext = rNext[0..$ - 1];
1071
1072         foreach(ref rKey; rKeys) {
1073             rKey =  K.init;
1074         }
1075         foreach(ref rVal; rVals) {
1076             rVal = V.init;
1077         }
1078         foreach(ref r; rNext) {
1079             r = usedSentinel;
1080         }
1081
1082
1083     }
1084
1085     /**Index an element of the range.  If it does not exist, it will be created
1086      * and initialized to V.init.*/
1087     ref V opIndex(K key) {
1088         hash_t hash = getHash(key);
1089
1090         if(rNext[hash] == usedSentinel) {
1091             rKeys[hash] =  key;
1092             rNext[hash] = null;
1093             _length++;
1094             return rVals[hash];
1095         } else if(rKeys[hash] == key) {
1096             return rVals[hash];
1097         } else {  // Collision.  Start chaining.
1098             Node** next = &(rNext[hash]);
1099             while(*next !is null) {
1100                 if((**next).key ==  key) {
1101                     return (**next).val;
1102                 }
1103                 next = &((**next).next);
1104             }
1105             *next = newNode(key);
1106             _length++;
1107             return (**next).val;
1108         }
1109     }
1110
1111     ///
1112     V opIndexAssign(V val, K key) {
1113         hash_t hash = getHash(key);
1114
1115         if(rNext[hash] == usedSentinel) {
1116             rKeys[hash] =  key;
1117             rVals[hash] = val;
1118             rNext[hash] = null;
1119             _length++;
1120             return val;
1121         } else if(rKeys[hash] ==  key) {
1122             rVals[hash] = val;
1123             return val;
1124         } else {  // Collision.  Start chaining.
1125             Node** next = &(rNext[hash]);
1126             while(*next !is null) {
1127                 if((**next).key == key) {
1128                     (**next).val = val;
1129                     return val;
1130                 }
1131                 next = &((**next).next);
1132             }
1133             _length++;
1134             *next = newNode(key, val);
1135             return val;
1136         }
1137     }
1138
1139     ///
1140     V* opIn_r(K key) {
1141         hash_t hash = getHash(key);
1142
1143         if(rNext[hash] == usedSentinel) {
1144             return null;
1145         } else if(rKeys[hash] == key) {
1146             return &(rVals[hash]);
1147         } else {  // Collision.  Start chaining.
1148             Node* next = rNext[hash];
1149             while(next !is null) {
1150                 if(next.key == key) {
1151                     return &(next.val);
1152                 }
1153                 next = next.next;
1154             }
1155             return null;
1156         }
1157    }
1158
1159     ///
1160     void remove(K key) {
1161         hash_t hash = getHash(key);
1162
1163         Node** next = &(rNext[hash]);
1164         if(rNext[hash] == usedSentinel) {
1165             return;
1166         } else if(rKeys[hash] == key) {
1167             _length--;
1168             if(rNext[hash] is null) {
1169                 rKeys[hash] = K.init;
1170                 rVals[hash] = V.init;
1171                 rNext[hash] = usedSentinel;
1172                 return;
1173             } else {
1174                 Node* toPush = *next;
1175
1176                 rKeys[hash] = (**next).key;
1177                 rVals[hash] = (**next).val;
1178                 rNext[hash] = (**next).next;
1179
1180                 pushFreeList(toPush);
1181                 return;
1182             }
1183         } else {  // Collision.  Start chaining.
1184             while(*next !is null) {
1185                 if((**next).key == key) {
1186                     _length--;
1187
1188                     Node* toPush = *next;
1189                     *next = (**next).next;
1190
1191                     pushFreeList(toPush);
1192                     break;
1193                 }
1194                 next = &((**next).next);
1195             }
1196             return;
1197         }
1198    }
1199
1200     /**Returns a forward range to iterate over the keys of this table.
1201      * The lifetime of the HashRange must not exceed the lifetime of this
1202      * StackHash.*/
1203     HashRange!(K, StackHash!(K, V)) keys() {
1204         return typeof(return)(&this);
1205     }
1206
1207     /**Returns a forward range to iterate over the values of this table.
1208      * The lifetime of the HashRange must not exceed the lifetime of this
1209      * StackHash.*/
1210     HashRange!(V, StackHash!(K, V), true) values() {
1211        return typeof(return)(&this);
1212     }
1213
1214     ///
1215     @property size_t length() const {
1216         return _length;
1217     }
1218
1219     /**
1220     Attempt to look up a key and return a default value if the key is not
1221     present.
1222     */
1223     V get(K key, lazy V defaultValue) {
1224         auto ptr = key in this;
1225         if(ptr) return *ptr;
1226         return defaultValue;
1227     }
1228
1229     int opApply(int delegate(ref K, ref V) dg) {
1230         auto k = this.keys;
1231         auto v = this.values;
1232         int res;
1233
1234         while(!k.empty) {
1235             auto kFront = k.front;
1236             res = dg(kFront, v.front);
1237             k.popFront;
1238             v.popFront;
1239             if(res) {
1240                 break;
1241             }
1242         }
1243
1244         return res;
1245     }
1246
1247     real efficiency() {
1248        uint used = 0;
1249        foreach(root; rNext) {
1250            if(root != usedSentinel) {
1251                used++;
1252            }
1253        }
1254        return cast(real) used / rNext.length;
1255     }
1256 }
1257
1258 unittest {
1259     alias StackHash!(string, uint) mySh;
1260
1261     {  // Basic sanity checks.
1262         mixin(newFrame);
1263         auto data = mySh(2);  // Make sure we get some collisions.
1264         data["foo"] = 1;
1265         data["bar"] = 2;
1266         data["baz"] = 3;
1267         data["waldo"] = 4;
1268         assert(!("foobar" in data));
1269         assert(*("foo" in data) == 1);
1270         assert(*("bar" in data) == 2);
1271         assert(*("baz" in data) == 3);
1272         assert(*("waldo" in data) == 4);
1273         assert(data["foo"] == 1);
1274         assert(data["bar"] == 2);
1275         assert(data["baz"] == 3);
1276         assert(data["waldo"] == 4);
1277         auto myKeys = toArray(data.keys);
1278         qsort(myKeys);
1279         assert(myKeys == cast(string[]) ["bar", "baz", "foo", "waldo"]);
1280         auto myValues = toArray(data.values);
1281         qsort(myValues);
1282         assert(myValues == [1U, 2, 3, 4]);
1283         {
1284             auto k = data.keys;
1285             auto v = data.values;
1286             while(!k.empty) {
1287                 assert(data[k.front] == v.front);
1288                 k.popFront;
1289                 v.popFront;
1290             }
1291         }
1292         foreach(v; data.values) {
1293             assert(v > 0 && v < 5);
1294         }
1295     }
1296
1297     alias StackHash!(uint, uint) mySh2;
1298     {   // Test remove.
1299         mixin(newFrame);
1300
1301         auto foo = mySh2(7);
1302         for(uint i = 0; i < 200; i++) {
1303             foo[i] = i;
1304         }
1305         assert(foo.length == 200);
1306         for(uint i = 0; i < 200; i += 2) {
1307             foo.remove(i);
1308         }
1309         foreach(i; 20..200) {
1310             foo.remove(i);
1311         }
1312         for(uint i = 0; i < 20; i++) {
1313             if(i & 1) {
1314                 assert(i in foo);
1315                 assert(*(i in foo) == i);
1316             } else {
1317                 assert(!(i in foo));
1318             }
1319         }
1320         auto vals = toArray(foo.values);
1321         assert(foo.length == 10);
1322         assert(vals.qsort == [1U, 3, 5, 7, 9, 11, 13, 15, 17, 19]);
1323     }
1324
1325     { // Monte carlo unittesting against builtin hash table.
1326         mixin(newFrame);
1327         uint[uint] builtin;
1328         auto monteSh = mySh2(20_000);
1329         uint[] nums = newStack!uint(100_000);
1330         foreach(ref num; nums) {
1331             num = uniform(0U, uint.max);
1332         }
1333
1334         foreach(i; 0..1_000_000) {
1335             auto index = uniform(0, cast(uint) nums.length);
1336             if(index in builtin) {
1337                 assert(index in monteSh);
1338                 assert(builtin[index] == nums[index]);
1339                 assert(monteSh[index] == nums[index]);
1340                 builtin.remove(index);
1341                 monteSh.remove(index);
1342             } else {
1343                 assert(!(index in monteSh));
1344                 builtin[index] = nums[index];
1345                 monteSh[index] = nums[index];
1346             }
1347         }
1348
1349         assert(builtin.length == monteSh.length);
1350         foreach(k, v; builtin) {
1351             assert(k in monteSh);
1352             assert(*(k in builtin) == *(k in monteSh));
1353             assert(monteSh[k] == v);
1354         }
1355
1356         // Make sure nothing is missed in iteration.  Since both keys and
1357         // values use the same struct, just with a few static if statements,
1358         // if it works for keys and simple tests work for values, it works.
1359         foreach(k; monteSh.keys) {
1360             builtin.remove(k);
1361         }
1362         assert(builtin.length == 0);
1363
1364     }
1365 }
1366
1367 /**A hash set that allocates its memory on TempAlloc.  Good for building a
1368  * temporary set that will not escape the current scope.
1369  *
1370  * To avoid TempAlloc memory leaks, use mixin(newFrame).
1371  *
1372  * Examples:
1373  * ---
1374  * mixin(newFrame);  // To make sure all memory gets freed at end of scope.
1375  * auto ss = StackSet!(uint)(5);
1376  * foreach(i; 0..5) {
1377  *     ss.insert(i);
1378  * }
1379  * assert(3 in ss);
1380  * ---
1381  */
1382 struct StackSet(K) {
1383 private:
1384     // Choose smallest representation of the data.
1385     struct Node1 {
1386         Node1* next;
1387         K key;
1388     }
1389
1390     struct Node2 {
1391         K key;
1392         Node2* next;
1393     }
1394
1395     static if(Node1.sizeof < Node2.sizeof) {
1396         alias Node1 Node;
1397     } else {
1398         alias Node2 Node;
1399     }
1400
1401     Unqual!(K)[] rKeys;
1402     Node*[] rNext;
1403
1404     Node** freeList;
1405
1406     TempAlloc.State TAState;
1407     size_t _length;
1408
1409     Node* usedSentinel() {
1410         return cast(Node*) rNext.ptr;
1411     }
1412
1413     // Tries to allocate off the free list.  Otherwise allocates off
1414     // TempAlloc.
1415     Node* allocNode() {
1416         if(*freeList is null) {
1417             return cast(Node*) TempAlloc(Node.sizeof, TAState);
1418         }
1419         auto ret = *freeList;
1420         *freeList = (*freeList).next;
1421         return ret;
1422     }
1423
1424     // Add a removed node to the free list.
1425     void pushFreeList(Node* node) {
1426         if(*freeList is null) {
1427             node.next = null;  // Sentinel
1428             *freeList = node;
1429         } else {
1430             node.next = *freeList;
1431             *freeList = node;
1432         }
1433     }
1434
1435     Node* newNode(K key) {
1436         Node* ret = allocNode();
1437         ret.key = key;
1438         ret.next = null;
1439         return ret;
1440     }
1441
1442     hash_t getHash(K key) {
1443         static if(is(K : long) && K.sizeof <= hash_t.sizeof) {
1444             hash_t hash = cast(hash_t) key;
1445         } else static if(__traits(compiles, key.toHash())) {
1446             hash_t hash = key.toHash();
1447         } else {
1448             hash_t hash = typeid(K).getHash(&key);
1449         }
1450         hash %= rNext.length;
1451         return hash;
1452     }
1453
1454 public:
1455     /**Due to the nature of TempAlloc, you must specify on object creation
1456      * the approximate number of elements your set will have.  Too large a
1457      * number will waste space and incur poor cache performance.  Too low a
1458      * number will make this struct perform like a linked list.  Generally,
1459      * if you're building a set from some other range, some fraction of the
1460      * size of that range is a good guess.*/
1461     this(size_t nElem) {
1462         // Obviously, the caller can never mean zero, because this struct
1463         // can't work at all with nElem == 0, so assume it's a mistake and fix
1464         // it here.
1465         if(nElem == 0)
1466             nElem++;
1467         TAState = TempAlloc.getState;
1468
1469         // Allocate the free list as the last element of rNext.
1470         rNext = newStack!(Node*)(nElem + 1, TAState);
1471         freeList = &(rNext[$ - 1]);
1472         *freeList = null;
1473         rNext = rNext[0..$ - 1];
1474
1475         foreach(ref root; rNext) {
1476             root = usedSentinel;
1477         }
1478
1479         rKeys = newStack!(Unqual!(K))(nElem, TAState);
1480         foreach(ref root; rKeys) {
1481             root = K.init;
1482         }
1483     }
1484
1485     ///
1486     void insert(K key) {
1487         hash_t hash = getHash(key);
1488
1489         if(rNext[hash] == usedSentinel) {
1490             rKeys[hash] = key;
1491             rNext[hash] = null;
1492             _length++;
1493             return;
1494         } else if(rKeys[hash] == key) {
1495             return;
1496         } else {  // Collision.  Start chaining.
1497             Node** next = &(rNext[hash]);
1498             while(*next !is null) {
1499                 if((**next).key == key) {
1500                     return;
1501                 }
1502                 next = &((**next).next);
1503             }
1504             *next = newNode(key);
1505             _length++;
1506             return;
1507         }
1508     }
1509
1510     /**Returns a forward range of the elements of this struct.  The range's
1511      * lifetime must not exceed the lifetime of this object.*/
1512     HashRange!(K, typeof(this)) elems() {
1513         auto ret = typeof(return)(&this);
1514         return ret;
1515     }
1516
1517     ///
1518     bool opIn_r(K key) {
1519         hash_t hash = getHash(key);
1520
1521         if(rNext[hash] == usedSentinel) {
1522             return false;
1523         } else if(rKeys[hash] == key) {
1524             return true;
1525         } else {  // Collision.  Start chaining.
1526             Node* next = rNext[hash];
1527             while(next !is null) {
1528                 if(next.key == key) {
1529                     return true;
1530                 }
1531                 next = next.next;
1532             }
1533             return false;
1534         }
1535    }
1536
1537     ///
1538     void remove(K key) {
1539         hash_t hash = getHash(key);
1540
1541         Node** next = &(rNext[hash]);
1542         if(rNext[hash] == usedSentinel) {
1543             return;
1544         } else if(rKeys[hash] == key) {
1545             _length--;
1546             if(rNext[hash] is null) {
1547                 rKeys[hash] = K.init;
1548                 rNext[hash] = usedSentinel;
1549                 return;
1550             } else {
1551                 Node* toPush = *next;
1552
1553                 rKeys[hash] = (**next).key;
1554                 rNext[hash] = (**next).next;
1555
1556                 pushFreeList(toPush);
1557                 return;
1558             }
1559         } else {  // Collision.  Start chaining.
1560             while(*next !is null) {
1561                 if((**next).key == key) {
1562                     _length--;
1563                     Node* toPush = *next;
1564
1565                     *next = (**next).next;
1566                     pushFreeList(toPush);
1567                     break;
1568                 }
1569                 next = &((**next).next);
1570             }
1571             return;
1572         }
1573    }
1574
1575     ///
1576     @property size_t length() {
1577        return _length;
1578     }
1579 }
1580
1581 unittest {
1582     { // "Normal" unittesting.
1583         mixin(newFrame);
1584         alias StackSet!(uint) mySS;
1585         mySS set = mySS(12);
1586         foreach(i; 0..20) {
1587             set.insert(i);
1588         }
1589         assert(toArray(set.elems).qsort == seq(0U, 20U));
1590
1591         for(uint i = 0; i < 20; i += 2) {
1592             set.remove(i);
1593         }
1594
1595         foreach(i; 0..20) {
1596             if(i & 1) {
1597                 assert(i in set);
1598             } else {
1599                 assert(!(i in set));
1600             }
1601         }
1602         uint[] contents;
1603
1604         foreach(elem; set.elems) {
1605             contents ~= elem;
1606         }
1607         assert(contents.qsort == [1U,3,5,7,9,11,13,15,17,19]);
1608     }
1609
1610     { // Monte carlo unittesting against builtin hash table.
1611         mixin(newFrame);
1612         bool[uint] builtin;
1613         auto monteSh = StackSet!uint(20_000);
1614
1615         foreach(i; 0..1_000_000) {
1616             auto index = uniform(0, 100_000);
1617             if(index in builtin) {
1618                 assert(index in monteSh);
1619                 builtin.remove(index);
1620                 monteSh.remove(index);
1621             } else {
1622                 assert(!(index in monteSh));
1623                 builtin[index] = 1;
1624                 monteSh.insert(index);
1625             }
1626         }
1627
1628         assert(builtin.length == monteSh.length);
1629         foreach(k, v; builtin) {
1630             assert(k in monteSh);
1631         }
1632
1633         foreach(k; monteSh.elems) {
1634             builtin.remove(k);
1635         }
1636         assert(builtin.length == 0);
1637     }
1638 }
1639
1640 private int height(T)(const T node) nothrow {
1641     return (node is null) ? 0 : node.height;
1642 }
1643
1644 struct AVLNodeRealHeight(T) {
1645     T payload;
1646     typeof(this)* left;
1647     typeof(this)* right;
1648     int height;
1649
1650     int balance() const nothrow @property {
1651         return .height(left) - .height(right);
1652     }
1653
1654     void fixHeight() nothrow {
1655         auto leftHeight = .height(left);
1656         auto rightHeight = .height(right);
1657
1658         height = ((leftHeight > rightHeight) ? leftHeight : rightHeight) + 1;
1659     }
1660
1661     bool isLeaf() nothrow @property {
1662         return left is null && right is null;
1663     }
1664 }
1665
1666 /* Store the height in the low order bits of the pointers to save space,
1667  * since TempAlloc allocates 16-byte aligned memory anyhow, but only if
1668  * this would be smaller after considering alignment.
1669  */
1670 struct AVLNodeBitwise(T) {
1671     T payload;
1672     size_t _left;
1673     size_t _right;
1674
1675     enum size_t mask = 0b1111;
1676     enum size_t notMask = ~mask;
1677
1678     typeof(this)* left() nothrow @property {
1679         return cast(typeof(return)) (_left & notMask);
1680     }
1681
1682     const(typeof(this))* left() const nothrow @property {
1683         return cast(typeof(return)) (_left & notMask);
1684     }
1685
1686     void left(typeof(this)* newLeft) nothrow @property
1687     in {
1688         assert((cast(size_t) newLeft & mask) == 0);
1689     } body {
1690         _left &= mask;
1691         _left |= cast(size_t) newLeft;
1692         assert(left is newLeft);
1693     }
1694
1695     typeof(this)* right() nothrow @property {
1696         return cast(typeof(return)) (_right & notMask);
1697     }
1698
1699     const(typeof(this))* right() const nothrow @property {
1700         return cast(typeof(return)) (_right & notMask);
1701     }
1702
1703     void right(typeof(this)* newRight) nothrow @property
1704     in {
1705         assert((cast(size_t) newRight & mask) == 0);
1706     } body {
1707         _right &= mask;
1708         _right |= cast(size_t) newRight;
1709         assert(right is newRight);
1710     }
1711
1712     int height() const nothrow @property {
1713         return (((_left & mask) << 4) |
1714                     (_right & mask));
1715     }
1716
1717     void height(int newHeight) nothrow @property {
1718         _right &= notMask;
1719         _right |= (newHeight & mask);
1720         newHeight >>= 4;
1721         _left &= notMask;
1722         _left |= (newHeight & mask);
1723     }
1724
1725     int balance() const nothrow @property {
1726         return .height(left) - .height(right);
1727     }
1728
1729     void fixHeight() nothrow {
1730         auto leftHeight = .height(left);
1731         auto rightHeight = .height(right);
1732
1733         height = ((leftHeight > rightHeight) ? leftHeight : rightHeight) + 1;
1734     }
1735
1736     bool isLeaf() const nothrow @property {
1737         return left is null && right is null;
1738     }
1739 }
1740
1741 private template GetAligned(uint size) {
1742     static if(size % TempAlloc.alignBytes == 0) {
1743         enum GetAligned = 0;
1744     } else {
1745         enum GetAligned =
1746             size - size % TempAlloc.alignBytes + TempAlloc.alignBytes;
1747     }
1748 }
1749
1750 /**An AVL tree implementation on top of TempAlloc.  If elements are removed,
1751  * they are stored on an internal free list and recycled when new elements
1752  * are added to the tree.
1753  *
1754  * Template paramters:
1755  *
1756  * T = The type to be stored in the tree.
1757  *
1758  * key = Function to access the key that what you're storing is to be compared
1759  *       on.
1760  *
1761  * compFun = The function for comparing keys.
1762  *
1763  * Examples:
1764  * ---
1765  * struct StringNum {
1766  *     string someString;
1767  *     uint num;
1768  * }
1769  *
1770  * // Create a StackTree of StringNums, sorted in descending order, using
1771  * // someString for comparison.
1772  * auto myTree = StackTree!(StringNum, "a.someString", "a > b")();
1773  *
1774  * // Add some elements.
1775  * myTree.insert( StringNum("foo", 1));
1776  * myTree.insert( StringNum("bar", 2));
1777  * myTree.insert( StringNum("foo", 3));
1778  *
1779  * assert(myTree.find("foo") == StringNum("foo", 3));
1780  * assert(myTree.find("bar") == StringNum("bar", 2));
1781  * ---
1782  *
1783  * Note:  This tree supports a compile-time interface similar to StackSet
1784  * and can be used as a finite set implementation.
1785  *
1786  * Warning:
1787  * This implementation places removed nodes on an internal free list and
1788  * recycles them, since there is no way to delete TempAlloc-allocated data
1789  * in a non-LIFO order.  Therefore, you may not retain the address of a
1790  * variable stored in a StackTree after deleting it from the StackTree.
1791  * For example, DO NOT do this:
1792  * ---
1793  * SomeType* myPtr = "foo" in myTree;
1794  * myTree.remove("foo");
1795  * *myPtr = someValue;
1796  * ---
1797  */
1798 struct StackTree(T, alias key = "a", alias compFun = "a < b") {
1799 private:
1800
1801     alias AVLNodeBitwise!(T) BitwiseNode;
1802     alias AVLNodeRealHeight!(T) RealNode;
1803
1804     enum size_t bitSize = GetAligned!(BitwiseNode.sizeof);
1805     enum size_t realHeightSize = GetAligned!(RealNode.sizeof);
1806
1807     static if(bitSize < realHeightSize ) {
1808         alias AVLNodeBitwise!(T) Node;
1809     } else {
1810         alias AVLNodeRealHeight!(T) Node;
1811     }
1812
1813     alias binaryFun!(compFun) comp;
1814     alias unaryFun!(key) getKey;
1815
1816     Node* head;
1817     Node** freeList;
1818     size_t _length;
1819     TempAlloc.State TAState;
1820
1821     static bool insertComp(T lhs, T rhs) {
1822         return comp( getKey(lhs), getKey(rhs));
1823     }
1824
1825     static Node* rotateRight(Node* node)
1826     in {
1827         assert(node.left !is null);
1828         assert( abs(node.balance) <= 2);
1829
1830     } body {
1831         Node* newHead = node.left;
1832         node.left = newHead.right;
1833         newHead.right = node;
1834
1835         node.fixHeight();
1836         newHead.fixHeight();
1837
1838         assert( abs(node.balance) < 2);
1839         return newHead;
1840     }
1841
1842     static Node* rotateLeft(Node* node)
1843     in {
1844         assert(node.right !is null);
1845         assert( abs(node.balance) <= 2);
1846     } body {
1847         Node* newHead = node.right;
1848         node.right = newHead.left;
1849         newHead.left = node;
1850
1851         node.fixHeight();
1852         newHead.fixHeight();
1853
1854         assert( abs(node.balance) < 2);
1855         return newHead;
1856     }
1857
1858     static Node* rebalance(Node* node)
1859     in {
1860         assert(node is null || abs(node.balance) <= 2);
1861     } out(ret) {
1862         assert( abs(ret.balance) < 2);
1863     } body {
1864         if(node is null) {
1865             return null;
1866         }
1867
1868         immutable balance = node.balance;
1869         if(abs(balance) <= 1) {
1870             return node;
1871         }
1872
1873         if(balance == -2) {
1874
1875             // It should be impossible to have a balance factor of -2 if
1876             // node.right is null.
1877             assert(node.right !is null);
1878             immutable rightBalance = node.right.balance;
1879             assert( abs(rightBalance) < 2);
1880
1881             if(rightBalance == 1) {
1882                 node.right = rotateRight(node.right);
1883                 node.fixHeight();
1884             }
1885
1886             assert(node.balance == -2);
1887             return rotateLeft(node);
1888
1889         } else if(balance == 2) {
1890             // It should be impossible to have a balance factor of 2 if
1891             // node.left is null.
1892             assert(node.left !is null);
1893             immutable leftBalance = node.left.balance;
1894             assert( abs(leftBalance) < 2);
1895
1896             if(leftBalance == -1) {
1897                 node.left = rotateLeft(node.left);
1898                 node.fixHeight();
1899             }
1900
1901             assert(node.balance == 2);
1902             return rotateRight(node);
1903         }
1904
1905         // AVL tree invariant is that abs(balance) <= 2 even during
1906         // insertion/deletion.
1907         assert(0);
1908     }
1909
1910     void pushFreeList(Node* node) {
1911         node.left = null;
1912         node.right = *freeList;
1913         *freeList = node;
1914     }
1915
1916     Node* popFreeList()
1917     in {
1918         assert(freeList);
1919         assert(*freeList);
1920     } body {
1921         auto ret = *freeList;
1922         *freeList = ret.right;
1923         return ret;
1924     }
1925
1926     Node* newNode(T payload)
1927     in {
1928         assert(freeList, "Uninitialized StackTree!(" ~ T.stringof ~ ")");
1929         assert(TAState, "Uninitialized StackTree!(" ~ T.stringof ~ ")");
1930     } body {
1931         Node* ret;
1932         if(*freeList !is null) {
1933             ret = popFreeList();
1934         } else {
1935             ret = cast(Node*) TempAlloc.malloc(Node.sizeof, TAState);
1936         }
1937
1938         ret.payload = payload;
1939         ret.left = null;
1940         ret.right = null;
1941         ret.height = 1;
1942         return ret;
1943     }
1944
1945 public:
1946     /**De facto constructor.  Not using a "real" c'tor only because structs
1947      * don't support default c'tors yet.  This must be called, or else you will
1948      * get an access violation when you try to insert an element.
1949      */
1950     static typeof(this) opCall() {
1951         typeof(this) ret;
1952         ret.TAState = TempAlloc.getState();
1953         ret.freeList = newStack!(Node*)(1).ptr;
1954         *(ret.freeList) = null;
1955         return ret;
1956     }
1957
1958     /**Insert an element.*/
1959     void insert(T toInsert) {
1960         if(head is null) {
1961             head = newNode(toInsert);
1962             _length++;
1963         } else {
1964             head = insertImpl(toInsert, head);
1965         }
1966     }
1967
1968     Node* insertImpl(T toInsert, Node* insertInto) {
1969         if( insertComp(toInsert, insertInto.payload) ) {
1970             if(insertInto.left is null) {
1971                 insertInto.left = newNode(toInsert);
1972                 _length++;
1973             } else {
1974                 insertInto.left = insertImpl(toInsert, insertInto.left);
1975             }
1976         } else if( insertComp(insertInto.payload, toInsert) ) {
1977             if(insertInto.right is null) {
1978                 insertInto.right = newNode(toInsert);
1979                 _length++;
1980             } else {
1981                 insertInto.right = insertImpl(toInsert, insertInto.right);
1982             }
1983         } else {
1984             // This is correct:  If the comparison key is only part of the
1985             // payload, the old payload may not be equal to the new payload,
1986             // even if the comparison keys are equal.
1987             insertInto.payload = toInsert;
1988             return insertInto;
1989         }
1990
1991         insertInto.fixHeight();
1992         return rebalance(insertInto);
1993     }
1994
1995     /**Remove an element from this tree.  The type of U is expected to be the
1996      * type of the key that this tree is sorted on.
1997      */
1998     void remove(U)(U whatToRemove) {
1999         Node* removedNode;
2000         Node* leftMost;
2001
2002         Node* removeLeftMost(Node* node) {
2003             if(node.left is null) {
2004                 auto ret = node.right;
2005                 node.right = null;
2006                 leftMost = node;
2007                 return ret;
2008             }
2009
2010             node.left = removeLeftMost(node.left);
2011             node.fixHeight();
2012             return rebalance(node);
2013         }
2014
2015         Node* removeSuccessor(Node* node) {
2016             if(node.right is null) {
2017                 assert(node.left.isLeaf);
2018                 leftMost = node.left;
2019
2020                 node.left = null;
2021                 return node;
2022             }
2023
2024             node.right = removeLeftMost(node.right);
2025             node.fixHeight();
2026             return node;
2027         }
2028
2029         Node* removeImpl(U whatToRemove, Node* whereToRemove) {
2030             static bool findComp(V, W)(V lhs, W rhs) {
2031                 static if(is(V == T)) {
2032                     static assert(is(W == U));
2033                     return comp( getKey(lhs), rhs);
2034                 } else {
2035                     static assert(is(V == U));
2036                     static assert(is(W == T));
2037                     return comp(lhs, getKey(rhs) );
2038                 }
2039             }
2040
2041             if(whereToRemove is null) {
2042                 return null;
2043             }
2044
2045             if( findComp(whatToRemove, whereToRemove.payload) ){
2046                 whereToRemove.left = removeImpl(whatToRemove, whereToRemove.left);
2047                 whereToRemove.fixHeight();
2048                 return rebalance(whereToRemove);
2049             } else if( findComp(whereToRemove.payload, whatToRemove) ) {
2050                 whereToRemove.right = removeImpl(whatToRemove, whereToRemove.right);
2051                 whereToRemove.fixHeight();
2052                 return rebalance(whereToRemove);
2053             } else {
2054                 // We've found it.
2055                 _length--;
2056                 removedNode = whereToRemove;
2057                 if(whereToRemove.isLeaf) {
2058                     return null;
2059                 }
2060
2061                 whereToRemove = removeSuccessor(whereToRemove);
2062                 if(leftMost is null) {
2063                     return null;
2064                 }
2065
2066                 leftMost.left = whereToRemove.left;
2067                 leftMost.right = whereToRemove.right;
2068                 leftMost.fixHeight();
2069                 return rebalance(leftMost);
2070             }
2071         }
2072
2073         head = removeImpl(whatToRemove, head);
2074
2075         debug(EXPENSIVE) assertAvl(head);
2076
2077         if(removedNode !is null) {
2078             pushFreeList(removedNode);
2079         }
2080     }
2081
2082     /**Find an element and return it.  Throw an exception if it is not
2083      * present.  U is expected to be the type of the key that this tree is
2084      * sorted on.*/
2085     T find(U)(U whatToFind) {
2086         T* ptr = dstatsEnforce( opIn_r!(U)(whatToFind),
2087             "Item not found:  " ~ to!string(whatToFind));
2088         return *ptr;
2089     }
2090
2091     /**Find an element and return a pointer to it, or null if not present.*/
2092     T* opIn_r(U)(U whatToFind) {
2093         auto ret = findImpl!(U)(whatToFind, head);
2094         if(ret is null) {
2095             return null;
2096         }
2097         return &(ret.payload);
2098     }
2099
2100     Node* findImpl(U)(U whatToFind, Node* whereToFind) {
2101         static bool findComp(V, W)(V lhs, W rhs) {
2102             static if(is(V == T)) {
2103                 static assert(is(W == U));
2104                 return comp( getKey(lhs), rhs );
2105             } else {
2106                 static assert(is(V == U));
2107                 static assert(is(W == T));
2108                 return comp( lhs, getKey(rhs) );
2109             }
2110         }
2111
2112         if(whereToFind is null) {
2113             return null;
2114         }
2115
2116         if( findComp(whatToFind, whereToFind.payload) ){
2117             return findImpl!(U)(whatToFind, whereToFind.left);
2118         } else if( findComp(whereToFind.payload, whatToFind) ) {
2119             return findImpl!(U)(whatToFind, whereToFind.right);
2120         } else {
2121             // We've found it.
2122             return whereToFind;
2123         }
2124
2125         assert(0);
2126     }
2127
2128     /**Iterate over the elements of this tree in sorted order.*/
2129     int opApply( int delegate(ref T) dg) {
2130         int res;
2131         int opApplyImpl(Node* node) {
2132             if(node is null) {
2133                 return 0;
2134             }
2135             res = opApplyImpl(node.left);
2136             if(res) {
2137                 return res;
2138             }
2139             res = dg(node.payload);
2140             if(res) {
2141                 return res;
2142             }
2143             res = opApplyImpl(node.right);
2144             return res;
2145         }
2146
2147         return opApplyImpl(head);
2148     }
2149
2150     /**Number of elements in the tree.*/
2151     @property size_t length() const pure nothrow @property {
2152         return _length;
2153     }
2154 }
2155
2156 private int assertAvl(T)(T node) {
2157     if(node is null) {
2158         return 0;
2159     }
2160
2161     int leftHeight = assertAvl(node.left);
2162     int rightHeight = assertAvl(node.right);
2163     assert(node.height == max(leftHeight, rightHeight) + 1);
2164     assert( abs(node.balance) < 2,
2165         text( height(node.left), '\t', height(node.right)));
2166
2167     if(node.left) {
2168         assert(node.left.payload < node.payload);
2169     }
2170
2171     if(node.right) {
2172         assert(node.right.payload > node.payload,
2173             text(node.payload, ' ', node.right.payload));
2174     }
2175
2176     return node.height;
2177 }
2178
2179
2180 unittest {
2181     // Test against StackSet on random data.
2182     mixin(newFrame);
2183     StackTree!(uint) myTree = StackTree!(uint)();
2184     StackSet!(uint) ss = StackSet!(uint)(500);
2185     foreach(i; 0..1_000_000) {
2186         uint num = uniform(0, 1_000);
2187         if(num in ss) {
2188             assert(num in myTree);
2189             assert(*(num in myTree) == num);
2190             ss.remove(num);
2191             myTree.remove(num);
2192         } else {
2193             assert(!(num in myTree));
2194             ss.insert(num);
2195             myTree.insert(num);
2196         }
2197     }
2198     assertAvl(myTree.head);
2199 }
2200
2201 /**Struct that iterates over keys or values of a StackTreeAA.
2202  *
2203  * Bugs:  Uses opApply instead of the more flexible ranges, because I
2204  * haven't figured out how to iterate efficiently and in sorted order over a
2205  * tree without control of the stack.
2206  */
2207 struct TreeAaIter(T, alias mapFun) {
2208     alias unaryFun!(mapFun) mFun;
2209     T tree;
2210     alias typeof(*(tree.head)) Node;
2211
2212 //    TreeRange!(T, mapFun) asRange() {
2213 //        dstatsEnforce(0, "Not implemented yet.");
2214 //    }
2215
2216     alias typeof( mFun( typeof(tree.head.payload).init) ) IterType;
2217
2218     ///
2219     int opApply( int delegate(ref IterType) dg) {
2220         int res;
2221         int opApplyImpl(Node* node) {
2222             if(node is null) {
2223                 return 0;
2224             }
2225             res = opApplyImpl(node.left);
2226             if(res) {
2227                 return res;
2228             }
2229
2230             static if(__traits(compiles, dg(mFun(node.payload)))) {
2231                 res = dg(mFun(node.payload));
2232             } else {
2233                 auto toDg = mFun(node.payload);
2234                 res = dg(toDg);
2235             }
2236
2237             if(res) {
2238                 return res;
2239             }
2240             res = opApplyImpl(node.right);
2241             return res;
2242         }
2243
2244         return opApplyImpl(tree.head);
2245     }
2246
2247     ///
2248     @property size_t length() const pure nothrow {
2249         return tree.length;
2250     }
2251 }
2252
2253 private struct StackTreeAANode(K, V) {
2254     Unqual!(K) key;
2255     Unqual!(V) value;
2256 }
2257
2258 /**An associative array implementation based on StackTree.  Lookups and
2259  * insertions are O(log N).  This is significantly slower in both theory and
2260  * practice than StackHash, but you may want to use it if:
2261  *
2262  * 1.  You don't know the approximate size of the table you will be creating
2263  *     in advance.  Unlike StackHash, this AA implementation does not need
2264  *     to pre-allocate anything.
2265  *
2266  * 2.  You care more about worst-case performance than average-case
2267  *     performance.
2268  *
2269  * 3.  You have a good comparison function for your type, but not a good hash
2270  *     function.
2271  *
2272  */
2273 struct StackTreeAA(K, V) {
2274     alias StackTreeAANode!(K, V) Node;
2275     StackTree!(Node, "a.key") tree;
2276
2277     static typeof(this) opCall() {
2278         typeof(this) ret;
2279         ret.tree = typeof(tree)();
2280         return ret;
2281     }
2282
2283     /**Looks up key in the table, returns it by reference.  If it does not
2284      * exist, it will be created and initialized to V.init.  This is handy,
2285      * for example, when counting things with integer types.
2286      */
2287     ref V opIndex(K key) {
2288         Node* result = key in tree;
2289         if(result is null) {
2290             tree.insert( Node(key, V.init));
2291             result = key in tree;
2292         }
2293
2294         return result.value;
2295     }
2296
2297     ///
2298     V opIndexAssign(V val, K key) {
2299         tree.insert( Node(key, val));
2300         return val;
2301     }
2302
2303     ///
2304     V* opIn_r(K key) {
2305         auto nodePtr = key in tree;
2306         if(nodePtr is null) {
2307             return null;
2308         }
2309
2310         return &(nodePtr.value);
2311     }
2312
2313     ///
2314     void remove(K key) {
2315         tree.remove(key);
2316     }
2317
2318     ///
2319     @property size_t length() const pure nothrow {
2320         return tree.length;
2321     }
2322
2323     ///
2324     TreeAaIter!( typeof(tree), "a.key") keys() @property {
2325         typeof(return) ret;
2326         ret.tree = tree;
2327         return ret;
2328     }
2329
2330     private static ref Unqual!(V) getVal(ref Node node) {
2331         return node.value;
2332     }
2333
2334     ///
2335     TreeAaIter!( typeof(tree), getVal) values() @property {
2336         return typeof(return)(tree);
2337     }
2338
2339     /**Iterate over both the keys and values of this associative array.*/
2340     int opApply( int delegate(ref Unqual!(K), ref Unqual!(V)) dg) {
2341         alias typeof(*(tree.head)) TreeNode;
2342         int res;
2343         int opApplyImpl(TreeNode* node) {
2344             if(node is null) {
2345                 return 0;
2346             }
2347             res = opApplyImpl(node.left);
2348             if(res) {
2349                 return res;
2350             }
2351             res = dg(node.payload.key, node.payload.value);
2352             if(res) {
2353                 return res;
2354             }
2355             res = opApplyImpl(node.right);
2356             return res;
2357         }
2358
2359         return opApplyImpl(tree.head);
2360     }
2361
2362 }
2363
2364 unittest {
2365     // Test against builtin AA on random data.
2366     {
2367         mixin(newFrame);
2368         alias StackTreeAA!(string, uint) mySh;
2369         auto data = mySh();
2370         data["foo"] = 1;
2371         data["bar"] = 2;
2372         data["baz"] = 3;
2373         data["waldo"] = 4;
2374         assert(!("foobar" in data));
2375         assert(*("foo" in data) == 1);
2376         assert(*("bar" in data) == 2);
2377         assert(*("baz" in data) == 3);
2378         assert(*("waldo" in data) == 4);
2379         assert(data["foo"] == 1);
2380         assert(data["bar"] == 2);
2381         assert(data["baz"] == 3);
2382         assert(data["waldo"] == 4);
2383
2384         assert(data.length == 4);
2385         auto myKeys = toArray(data.keys);
2386         qsort(myKeys);
2387         assert(myKeys == cast(string[]) ["bar", "baz", "foo", "waldo"]);
2388         auto myValues = toArray(data.values);
2389         qsort(myValues);
2390         assert(myValues == [1U, 2, 3, 4]);
2391
2392         foreach(v; data.values) {
2393             assert(v > 0 && v < 5);
2394         }
2395     }
2396
2397     alias StackTreeAA!(uint, uint) mySh2;
2398     {   // Test remove.
2399         mixin(newFrame);
2400
2401         auto foo = mySh2();
2402         for(uint i = 0; i < 200; i++) {
2403             foo[i] = i;
2404         }
2405         assert(foo.length == 200);
2406         for(uint i = 0; i < 200; i += 2) {
2407             foo.remove(i);
2408         }
2409         foreach(i; 20..200) {
2410             foo.remove(i);
2411         }
2412         for(uint i = 0; i < 20; i++) {
2413             if(i & 1) {
2414                 assert(i in foo);
2415                 assert(*(i in foo) == i);
2416             } else {
2417                 assert(!(i in foo));
2418             }
2419         }
2420         auto vals = toArray(foo.values);
2421         assert(foo.length == 10);
2422         assert(vals.qsort == [1U, 3, 5, 7, 9, 11, 13, 15, 17, 19]);
2423     }
2424
2425     { // Monte carlo unittesting against builtin hash table.
2426         mixin(newFrame);
2427         uint[uint] builtin;
2428         auto monteSh = mySh2();
2429         uint[] nums = newStack!uint(100_000);
2430         foreach(ref num; nums) {
2431             num = uniform(0U, uint.max);
2432         }
2433
2434         foreach(i; 0..10_000) {
2435             auto index = uniform(0, cast(uint) nums.length);
2436             if(index in builtin) {
2437                 assert(index in monteSh);
2438                 assert(builtin[index] == nums[index]);
2439                 assert(monteSh[index] == nums[index]);
2440                 builtin.remove(index);
2441                 monteSh.remove(index);
2442             } else {
2443                 assert(!(index in monteSh));
2444                 builtin[index] = nums[index];
2445                 monteSh[index] = nums[index];
2446             }
2447         }
2448
2449         assert(builtin.length == monteSh.length);
2450         foreach(k, v; builtin) {
2451             assert(k in monteSh);
2452             assert(*(k in builtin) == *(k in monteSh));
2453             assert(monteSh[k] == v);
2454         }
2455
2456         // Make sure nothing is missed in iteration.  Since both keys and
2457         // values use the same struct, just with a few static if statements,
2458         // if it works for keys and simple tests work for values, it works.
2459         foreach(k; monteSh.keys) {
2460             builtin.remove(k);
2461         }
2462         assert(builtin.length == 0);
2463     }
2464 }
Note: See TracBrowser for help on using the browser.