-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathHalideBuffer.h
1997 lines (1777 loc) · 72.4 KB
/
HalideBuffer.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/** \file
* Defines a Buffer type that wraps from buffer_t and adds
* functionality, and methods for more conveniently iterating over the
* samples in a buffer_t outside of Halide code. */
#ifndef HALIDE_RUNTIME_BUFFER_H
#define HALIDE_RUNTIME_BUFFER_H
#include <memory>
#include <vector>
#include <cassert>
#include <atomic>
#include <algorithm>
#include <limits>
#include <stdint.h>
#include <string.h>
#include "HalideRuntime.h"
#ifdef _MSC_VER
#define HALIDE_ALLOCA _alloca
#else
#define HALIDE_ALLOCA __builtin_alloca
#endif
// gcc 5.1 has a false positive warning on this code
#if __GNUC__ == 5 && __GNUC_MINOR__ == 1
#pragma GCC diagnostic ignored "-Warray-bounds"
#endif
namespace Halide {
namespace Runtime {
// Forward-declare our Buffer class
template<typename T, int D> class Buffer;
// A helper to check if a parameter pack is entirely implicitly
// int-convertible to use with std::enable_if
template<typename ...Args>
struct AllInts : std::false_type {};
template<>
struct AllInts<> : std::true_type {};
template<typename T, typename ...Args>
struct AllInts<T, Args...> {
static const bool value = std::is_convertible<T, int>::value && AllInts<Args...>::value;
};
// Floats and doubles are technically implicitly int-convertible, but
// doing so produces a warning we treat as an error, so just disallow
// it here.
template<typename ...Args>
struct AllInts<float, Args...> : std::false_type {};
template<typename ...Args>
struct AllInts<double, Args...> : std::false_type {};
/** A struct acting as a header for allocations owned by the Buffer
* class itself. */
struct AllocationHeader {
void (*deallocate_fn)(void *);
std::atomic<int> ref_count {0};
};
/** This indicates how to deallocate the device for a Halide::Runtime::Buffer. */
enum struct BufferDeviceOwnership : int {
Allocated, ///> halide_device_free will be called when device ref count goes to zero
WrappedNative, ///> halide_device_detach_native will be called when device ref count goes to zero
Unmanaged, ///> No free routine will be called when device ref count goes to zero
AllocatedDeviceAndHost, ///> Call device_and_host_free when DeveRefCount goes to zero.
};
/** A similar struct for managing device allocations. */
struct DeviceRefCount {
// This is only ever constructed when there's something to manage,
// so start at one.
std::atomic<int> count {1};
BufferDeviceOwnership ownership{BufferDeviceOwnership::Allocated};
};
/** A templated Buffer class that wraps halide_buffer_t and adds
* functionality. When using Halide from C++, this is the preferred
* way to create input and output buffers. The overhead of using this
* class relative to a naked halide_buffer_t is minimal - it uses another
* ~16 bytes on the stack, and does no dynamic allocations when using
* it to represent existing memory of a known maximum dimensionality.
*
* The template parameter T is the element type. For buffers where the
* element type is unknown, or may vary, use void or const void.
*
* D is the maximum number of dimensions that can be represented using
* space inside the class itself. Set it to the maximum dimensionality
* you expect this buffer to be. If the actual dimensionality exceeds
* this, heap storage is allocated to track the shape of the buffer. D
* defaults to 4, which should cover nearly all usage.
*
* The class optionally allocates and owns memory for the image using
* a shared pointer allocated with the provided allocator. If they are
* null, malloc and free are used. Any device-side allocation is
* considered as owned if and only if the host-side allocation is
* owned. */
template<typename T = void, int D = 4>
class Buffer {
/** The underlying buffer_t */
halide_buffer_t buf = {0};
/** Some in-class storage for shape of the dimensions. */
halide_dimension_t shape[D];
/** The allocation owned by this Buffer. NULL if the Buffer does not
* own the memory. */
AllocationHeader *alloc = nullptr;
/** A reference count for the device allocation owned by this
* buffer. */
mutable DeviceRefCount *dev_ref_count = nullptr;
/** True if T is of type void or const void */
static const bool T_is_void = std::is_same<typename std::remove_const<T>::type, void>::value;
/** A type function that adds a const qualifier if T is a const type. */
template<typename T2>
using add_const_if_T_is_const = typename std::conditional<std::is_const<T>::value, const T2, T2>::type;
/** T unless T is (const) void, in which case (const)
* uint8_t. Useful for providing return types for operator() */
using not_void_T = typename std::conditional<T_is_void,
add_const_if_T_is_const<uint8_t>,
T>::type;
/** The type the elements are stored as. Equal to not_void_T
* unless T is a pointer, in which case uint64_t. Halide stores
* all pointer types as uint64s internally, even on 32-bit
* systems. */
using storage_T = typename std::conditional<std::is_pointer<T>::value, uint64_t, not_void_T>::type;
public:
/** True if the Halide type is not void (or const void). */
static constexpr bool has_static_halide_type = !T_is_void;
/** Get the Halide type of T. Callers should not use the result if
* has_static_halide_type is false. */
static halide_type_t static_halide_type() {
return halide_type_of<typename std::remove_cv<not_void_T>::type>();
}
/** Does this Buffer own the host memory it refers to? */
bool owns_host_memory() const {
return alloc != nullptr;
}
private:
/** Increment the reference count of any owned allocation */
void incref() const {
if (owns_host_memory()) {
alloc->ref_count++;
}
if (buf.device) {
if (!dev_ref_count) {
// I seem to have a non-zero dev field but no
// reference count for it. I must have been given a
// device allocation by a Halide pipeline, and have
// never been copied from since. Take sole ownership
// of it.
dev_ref_count = new DeviceRefCount;
}
dev_ref_count->count++;
}
}
/** Decrement the reference count of any owned allocation and free host
* and device memory if it hits zero. Sets alloc to nullptr. */
void decref() {
if (owns_host_memory()) {
int new_count = --(alloc->ref_count);
if (new_count == 0) {
void (*fn)(void *) = alloc->deallocate_fn;
fn(alloc);
}
buf.host = nullptr;
alloc = nullptr;
set_host_dirty(false);
}
decref_dev();
}
void decref_dev() {
int new_count = 0;
if (dev_ref_count) {
new_count = --(dev_ref_count->count);
}
if (new_count == 0) {
if (buf.device) {
assert(!(alloc && device_dirty()) &&
"Implicitly freeing a dirty device allocation while a host allocation still lives. "
"Call device_free explicitly if you want to drop dirty device-side data. "
"Call copy_to_host explicitly if you want the data copied to the host allocation "
"before the device allocation is freed.");
if (dev_ref_count && dev_ref_count->ownership == BufferDeviceOwnership::WrappedNative) {
buf.device_interface->detach_native(nullptr, &buf);
} else if (dev_ref_count && dev_ref_count->ownership == BufferDeviceOwnership::AllocatedDeviceAndHost) {
buf.device_interface->device_and_host_free(nullptr, &buf);
} else if (dev_ref_count == nullptr || dev_ref_count->ownership == BufferDeviceOwnership::Allocated) {
buf.device_interface->device_free(nullptr, &buf);
}
}
if (dev_ref_count) {
delete dev_ref_count;
}
}
buf.device = 0;
buf.device_interface = nullptr;
dev_ref_count = nullptr;
}
void free_shape_storage() {
if (buf.dim != shape) {
delete[] buf.dim;
buf.dim = nullptr;
}
}
void make_shape_storage() {
if (buf.dimensions <= D) {
buf.dim = shape;
} else {
buf.dim = new halide_dimension_t[buf.dimensions];
}
}
void copy_shape_from(const halide_buffer_t &other) {
// All callers of this ensure that buf.dimensions == other.dimensions.
make_shape_storage();
for (int i = 0; i < buf.dimensions; i++) {
buf.dim[i] = other.dim[i];
}
}
template<typename T2, int D2>
void move_shape_from(Buffer<T2, D2> &&other) {
if (other.shape == other.buf.dim) {
copy_shape_from(other.buf);
} else {
buf.dim = other.buf.dim;
other.buf.dim = nullptr;
}
}
/** Initialize the shape from a halide_buffer_t. */
void initialize_from_buffer(const halide_buffer_t &b,
BufferDeviceOwnership ownership) {
memcpy(&buf, &b, sizeof(halide_buffer_t));
copy_shape_from(b);
if (b.device) {
dev_ref_count = new DeviceRefCount;
dev_ref_count->ownership = ownership;
}
}
/** Initialize the shape from a parameter pack of ints */
template<typename ...Args>
void initialize_shape(int next, int first, Args... rest) {
buf.dim[next].min = 0;
buf.dim[next].extent = first;
if (next == 0) {
buf.dim[next].stride = 1;
} else {
buf.dim[next].stride = buf.dim[next-1].stride * buf.dim[next-1].extent;
}
initialize_shape(next + 1, rest...);
}
/** Base case for the template recursion above. */
void initialize_shape(int) {
}
/** Initialize the shape from a vector of extents */
void initialize_shape(const std::vector<int> &sizes) {
assert(sizes.size() <= std::numeric_limits<int>::max());
int limit = (int)sizes.size();
assert(limit <= dimensions());
for (int i = 0; i < limit; i++) {
buf.dim[i].min = 0;
buf.dim[i].extent = sizes[i];
if (i == 0) {
buf.dim[i].stride = 1;
} else {
buf.dim[i].stride = buf.dim[i-1].stride * buf.dim[i-1].extent;
}
}
}
/** Initialize the shape from the static shape of an array */
template<typename Array, size_t N>
void initialize_shape_from_array_shape(int next, Array (&vals)[N]) {
buf.dim[next].min = 0;
buf.dim[next].extent = (int)N;
if (next == 0) {
buf.dim[next].stride = 1;
} else {
initialize_shape_from_array_shape(next - 1, vals[0]);
buf.dim[next].stride = buf.dim[next - 1].stride * buf.dim[next - 1].extent;
}
}
/** Base case for the template recursion above. */
template<typename T2>
void initialize_shape_from_array_shape(int, const T2 &) {
}
/** Get the dimensionality of a multi-dimensional C array */
template<typename Array, size_t N>
static int dimensionality_of_array(Array (&vals)[N]) {
return dimensionality_of_array(vals[0]) + 1;
}
template<typename T2>
static int dimensionality_of_array(const T2 &) {
return 0;
}
/** Get the underlying halide_type_t of an array's element type. */
template<typename Array, size_t N>
static halide_type_t scalar_type_of_array(Array (&vals)[N]) {
return scalar_type_of_array(vals[0]);
}
template<typename T2>
static halide_type_t scalar_type_of_array(const T2 &) {
return halide_type_of<typename std::remove_cv<T2>::type>();
}
/** Check if any args in a parameter pack are zero */
template<typename ...Args>
static bool any_zero(int first, Args... rest) {
if (first == 0) return true;
return any_zero(rest...);
}
static bool any_zero() {
return false;
}
static bool any_zero(const std::vector<int> &v) {
for (int i : v) {
if (i == 0) return true;
}
return false;
}
public:
typedef T ElemType;
/** Read-only access to the shape */
class Dimension {
const halide_dimension_t &d;
public:
/** The lowest coordinate in this dimension */
HALIDE_ALWAYS_INLINE int min() const {
return d.min;
}
/** The number of elements in memory you have to step over to
* increment this coordinate by one. */
HALIDE_ALWAYS_INLINE int stride() const {
return d.stride;
}
/** The extent of the image along this dimension */
HALIDE_ALWAYS_INLINE int extent() const {
return d.extent;
}
/** The highest coordinate in this dimension */
HALIDE_ALWAYS_INLINE int max() const {
return min() + extent() - 1;
}
/** An iterator class, so that you can iterate over
* coordinates in a dimensions using a range-based for loop. */
struct iterator {
int val;
int operator*() const {return val;}
bool operator!=(const iterator &other) const {return val != other.val;}
iterator &operator++() {val++; return *this;}
};
/** An iterator that points to the min coordinate */
HALIDE_ALWAYS_INLINE iterator begin() const {
return {min()};
}
/** An iterator that points to one past the max coordinate */
HALIDE_ALWAYS_INLINE iterator end() const {
return {min() + extent()};
}
Dimension(const halide_dimension_t &dim) : d(dim) {};
};
/** Access the shape of the buffer */
HALIDE_ALWAYS_INLINE Dimension dim(int i) const {
return Dimension(buf.dim[i]);
}
/** Access to the mins, strides, extents. Will be deprecated. Do not use. */
// @{
int min(int i) const { return dim(i).min(); }
int extent(int i) const { return dim(i).extent(); }
int stride(int i) const { return dim(i).stride(); }
// @}
/** The total number of elements this buffer represents. Equal to
* the product of the extents */
size_t number_of_elements() const {
size_t s = 1;
for (int i = 0; i < dimensions(); i++) {
s *= dim(i).extent();
}
return s;
}
/** Get the dimensionality of the buffer. */
int dimensions() const {
return buf.dimensions;
}
/** Get the type of the elements. */
halide_type_t type() const {
return buf.type;
}
/** A pointer to the element with the lowest address. If all
* strides are positive, equal to the host pointer. */
T *begin() const {
ptrdiff_t index = 0;
for (int i = 0; i < dimensions(); i++) {
if (dim(i).stride() < 0) {
index += dim(i).stride() * (dim(i).extent() - 1);
}
}
return (T *)(buf.host + index * type().bytes());
}
/** A pointer to one beyond the element with the highest address. */
T *end() const {
ptrdiff_t index = 0;
for (int i = 0; i < dimensions(); i++) {
if (dim(i).stride() > 0) {
index += dim(i).stride() * (dim(i).extent() - 1);
}
}
index += 1;
return (T *)(buf.host + index * type().bytes());
}
/** The total number of bytes spanned by the data in memory. */
size_t size_in_bytes() const {
return (size_t)((const uint8_t *)end() - (const uint8_t *)begin());
}
Buffer() {
buf.type = static_halide_type();
make_shape_storage();
}
/** Make a Buffer from a halide_buffer_t */
Buffer(const halide_buffer_t &buf,
BufferDeviceOwnership ownership = BufferDeviceOwnership::Unmanaged) {
assert(T_is_void || buf.type == static_halide_type());
initialize_from_buffer(buf, ownership);
}
/** Make a Buffer from a legacy buffer_t. */
Buffer(const buffer_t &old_buf) {
assert(!T_is_void && old_buf.elem_size == static_halide_type().bytes());
buf.host = old_buf.host;
buf.type = static_halide_type();
int d;
for (d = 0; d < 4 && old_buf.extent[d]; d++);
buf.dimensions = d;
make_shape_storage();
for (int i = 0; i < d; i++) {
buf.dim[i].min = old_buf.min[i];
buf.dim[i].extent = old_buf.extent[i];
buf.dim[i].stride = old_buf.stride[i];
}
buf.set_host_dirty(old_buf.host_dirty);
assert(old_buf.dev == 0 && "Cannot construct a Halide::Runtime::Buffer from a legacy buffer_t with a device allocation. Use halide_upgrade_buffer_t to upgrade it to a halide_buffer_t first.");
}
/** Populate the fields of a legacy buffer_t using this
* Buffer. Does not copy device metadata. */
buffer_t make_legacy_buffer_t() const {
buffer_t old_buf = {0};
assert(!has_device_allocation() && "Cannot construct a legacy buffer_t from a Halide::Runtime::Buffer with a device allocation. Use halide_downgrade_buffer_t instead.");
old_buf.host = buf.host;
old_buf.elem_size = buf.type.bytes();
assert(dimensions() <= 4 && "Cannot construct a legacy buffer_t from a Halide::Runtime::Buffer with more than four dimensions.");
for (int i = 0; i < dimensions(); i++) {
old_buf.min[i] = dim(i).min();
old_buf.extent[i] = dim(i).extent();
old_buf.stride[i] = dim(i).stride();
}
return old_buf;
}
/** Give Buffers access to the members of Buffers of different dimensionalities and types. */
template<typename T2, int D2> friend class Buffer;
/** Determine if if an Buffer<T, D> can be constructed from some other Buffer type.
* If this can be determined at compile time, fail with a static assert; otherwise
* return a boolean based on runtime typing. */
template<typename T2, int D2>
static bool can_convert_from(const Buffer<T2, D2> &other) {
static_assert((!std::is_const<T2>::value || std::is_const<T>::value),
"Can't convert from a Buffer<const T> to a Buffer<T>");
static_assert(std::is_same<typename std::remove_const<T>::type,
typename std::remove_const<T2>::type>::value ||
T_is_void || Buffer<T2, D2>::T_is_void,
"type mismatch constructing Buffer");
if (Buffer<T2, D2>::T_is_void && !T_is_void) {
return other.type() == static_halide_type();
}
return true;
}
/** Fail an assertion at runtime or compile-time if an Buffer<T, D>
* cannot be constructed from some other Buffer type. */
template<typename T2, int D2>
static void assert_can_convert_from(const Buffer<T2, D2> &other) {
assert(can_convert_from(other));
}
/** Copy constructor. Does not copy underlying data. */
Buffer(const Buffer<T, D> &other) : buf(other.buf),
alloc(other.alloc) {
other.incref();
dev_ref_count = other.dev_ref_count;
copy_shape_from(other.buf);
}
/** Construct a Buffer from a Buffer of different dimensionality
* and type. Asserts that the type matches (at runtime, if one of
* the types is void). Note that this constructor is
* implicit. This, for example, lets you pass things like
* Buffer<T> or Buffer<const void> to functions expected
* Buffer<const T>. */
template<typename T2, int D2>
Buffer(const Buffer<T2, D2> &other) : buf(other.buf),
alloc(other.alloc) {
assert_can_convert_from(other);
other.incref();
dev_ref_count = other.dev_ref_count;
copy_shape_from(other.buf);
}
/** Move constructor */
Buffer(Buffer<T, D> &&other) : buf(other.buf),
alloc(other.alloc),
dev_ref_count(other.dev_ref_count) {
other.dev_ref_count = nullptr;
other.alloc = nullptr;
other.buf.device = 0;
other.buf.device_interface = nullptr;
move_shape_from(std::forward<Buffer<T, D>>(other));
}
/** Move-construct a Buffer from a Buffer of different
* dimensionality and type. Asserts that the types match (at
* runtime if one of the types is void). */
template<typename T2, int D2>
Buffer(Buffer<T2, D2> &&other) : buf(other.buf),
alloc(other.alloc),
dev_ref_count(other.dev_ref_count) {
other.dev_ref_count = nullptr;
other.alloc = nullptr;
other.buf.device = 0;
other.buf.device_interface = nullptr;
move_shape_from(std::forward<Buffer<T2, D2>>(other));
}
/** Assign from another Buffer of possibly-different
* dimensionality and type. Asserts that the types match (at
* runtime if one of the types is void). */
template<typename T2, int D2>
Buffer<T, D> &operator=(const Buffer<T2, D2> &other) {
if ((const void *)this == (const void *)&other) {
return *this;
}
assert_can_convert_from(other);
other.incref();
decref();
dev_ref_count = other.dev_ref_count;
alloc = other.alloc;
free_shape_storage();
buf = other.buf;
copy_shape_from(other.buf);
return *this;
}
/** Standard assignment operator */
Buffer<T, D> &operator=(const Buffer<T, D> &other) {
if (this == &other) {
return *this;
}
other.incref();
decref();
dev_ref_count = other.dev_ref_count;
alloc = other.alloc;
free_shape_storage();
buf = other.buf;
copy_shape_from(other.buf);
return *this;
}
/** Move from another Buffer of possibly-different
* dimensionality and type. Asserts that the types match (at
* runtime if one of the types is void). */
template<typename T2, int D2>
Buffer<T, D> &operator=(Buffer<T2, D2> &&other) {
assert_can_convert_from(other);
decref();
alloc = other.alloc;
other.alloc = nullptr;
dev_ref_count = other.dev_ref_count;
other.dev_ref_count = nullptr;
free_shape_storage();
buf = other.buf;
other.buf.device = 0;
other.buf.device_interface = nullptr;
move_shape_from(std::forward<Buffer<T2, D2>>(other));
return *this;
}
/** Standard move-assignment operator */
Buffer<T, D> &operator=(Buffer<T, D> &&other) {
decref();
alloc = other.alloc;
other.alloc = nullptr;
dev_ref_count = other.dev_ref_count;
other.dev_ref_count = nullptr;
free_shape_storage();
buf = other.buf;
other.buf.device = 0;
other.buf.device_interface = nullptr;
move_shape_from(std::forward<Buffer<T, D>>(other));
return *this;
}
/** Check the product of the extents fits in memory. */
void check_overflow() {
size_t size = type().bytes();
for (int i = 0; i < dimensions(); i++) {
size *= dim(i).extent();
}
// We allow 2^31 or 2^63 bytes, so drop the top bit.
size = (size << 1) >> 1;
for (int i = 0; i < dimensions(); i++) {
size /= dim(i).extent();
}
assert(size == (size_t)type().bytes() && "Error: Overflow computing total size of buffer.");
}
/** Allocate memory for this Buffer. Drops the reference to any
* owned memory. */
void allocate(void *(*allocate_fn)(size_t) = nullptr,
void (*deallocate_fn)(void *) = nullptr) {
if (!allocate_fn) {
allocate_fn = malloc;
}
if (!deallocate_fn) {
deallocate_fn = free;
}
// Drop any existing allocation
deallocate();
// Conservatively align images to 128 bytes. This is enough
// alignment for all the platforms we might use.
size_t size = size_in_bytes();
const size_t alignment = 128;
size = (size + alignment - 1) & ~(alignment - 1);
alloc = (AllocationHeader *)allocate_fn(size + sizeof(AllocationHeader) + alignment - 1);
alloc->deallocate_fn = deallocate_fn;
alloc->ref_count = 1;
uint8_t *unaligned_ptr = ((uint8_t *)alloc) + sizeof(AllocationHeader);
buf.host = (uint8_t *)((uintptr_t)(unaligned_ptr + alignment - 1) & ~(alignment - 1));
}
/** Drop reference to any owned host or device memory, possibly
* freeing it, if this buffer held the last reference to
* it. Retains the shape of the buffer. Does nothing if this
* buffer did not allocate its own memory. */
void deallocate() {
decref();
}
/** Drop reference to any owned device memory, possibly freeing it
* if this buffer held the last reference to it. Asserts that
* device_dirty is false. */
void device_deallocate() {
decref_dev();
}
/** Allocate a new image of the given size with a runtime
* type. Only used when you do know what size you want but you
* don't know statically what type the elements are. Pass zeroes
* to make a buffer suitable for bounds query calls. */
template<typename ...Args,
typename = typename std::enable_if<AllInts<Args...>::value>::type>
Buffer(halide_type_t t, int first, Args... rest) {
if (!T_is_void) {
assert(static_halide_type() == t);
}
buf.type = t;
buf.dimensions = 1 + (int)(sizeof...(rest));
make_shape_storage();
initialize_shape(0, first, rest...);
if (!any_zero(first, rest...)) {
check_overflow();
allocate();
}
}
/** Allocate a new image of the given size. Pass zeroes to make a
* buffer suitable for bounds query calls. */
// @{
// The overload with one argument is 'explicit', so that
// (say) int is not implicitly convertable to Buffer<int>
explicit Buffer(int first) {
static_assert(!T_is_void,
"To construct an Buffer<void>, pass a halide_type_t as the first argument to the constructor");
buf.type = static_halide_type();
buf.dimensions = 1;
make_shape_storage();
initialize_shape(0, first);
if (first != 0) {
check_overflow();
allocate();
}
}
template<typename ...Args,
typename = typename std::enable_if<AllInts<Args...>::value>::type>
Buffer(int first, int second, Args... rest) {
static_assert(!T_is_void,
"To construct an Buffer<void>, pass a halide_type_t as the first argument to the constructor");
buf.type = static_halide_type();
buf.dimensions = 2 + (int)(sizeof...(rest));
make_shape_storage();
initialize_shape(0, first, second, rest...);
if (!any_zero(first, second, rest...)) {
check_overflow();
allocate();
}
}
// @}
/** Allocate a new image of unknown type using a vector of ints as the size. */
Buffer(halide_type_t t, const std::vector<int> &sizes) {
if (!T_is_void) {
assert(static_halide_type() == t);
}
buf.type = t;
buf.dimensions = (int)sizes.size();
make_shape_storage();
initialize_shape(sizes);
if (!any_zero(sizes)) {
check_overflow();
allocate();
}
}
/** Allocate a new image of known type using a vector of ints as the size. */
Buffer(const std::vector<int> &sizes) {
buf.type = static_halide_type();
buf.dimensions = (int)sizes.size();
make_shape_storage();
initialize_shape(sizes);
if (!any_zero(sizes)) {
check_overflow();
allocate();
}
}
/** Make an Buffer that refers to a statically sized array. Does not
* take ownership of the data, and does not set the host_dirty flag. */
template<typename Array, size_t N>
explicit Buffer(Array (&vals)[N]) {
buf.dimensions = dimensionality_of_array(vals);
buf.type = scalar_type_of_array(vals);
buf.host = (uint8_t *)vals;
make_shape_storage();
initialize_shape_from_array_shape(buf.dimensions - 1, vals);
}
/** Initialize an Buffer of runtime type from a pointer and some
* sizes. Assumes dense row-major packing and a min coordinate of
* zero. Does not take ownership of the data and does not set the
* host_dirty flag. */
template<typename ...Args,
typename = typename std::enable_if<AllInts<Args...>::value>::type>
explicit Buffer(halide_type_t t, add_const_if_T_is_const<void> *data, int first, Args&&... rest) {
if (!T_is_void) {
assert(static_halide_type() == t);
}
buf.type = t;
buf.dimensions = 1 + (int)(sizeof...(rest));
buf.host = (uint8_t *)data;
make_shape_storage();
initialize_shape(0, first, int(rest)...);
}
/** Initialize an Buffer from a pointer and some sizes. Assumes
* dense row-major packing and a min coordinate of zero. Does not
* take ownership of the data and does not set the host_dirty flag. */
template<typename ...Args,
typename = typename std::enable_if<AllInts<Args...>::value>::type>
explicit Buffer(T *data, int first, Args&&... rest) {
buf.type = static_halide_type();
buf.dimensions = 1 + (int)(sizeof...(rest));
buf.host = (uint8_t *)data;
make_shape_storage();
initialize_shape(0, first, int(rest)...);
}
/** Initialize an Buffer from a pointer and a vector of
* sizes. Assumes dense row-major packing and a min coordinate of
* zero. Does not take ownership of the data and does not set the
* host_dirty flag. */
explicit Buffer(T *data, const std::vector<int> &sizes) {
buf.type = static_halide_type();
buf.dimensions = (int)sizes.size();
buf.host = (uint8_t *)data;
make_shape_storage();
initialize_shape(sizes);
}
/** Initialize an Buffer of runtime type from a pointer and a
* vector of sizes. Assumes dense row-major packing and a min
* coordinate of zero. Does not take ownership of the data and
* does not set the host_dirty flag. */
explicit Buffer(halide_type_t t, add_const_if_T_is_const<void> *data, const std::vector<int> &sizes) {
if (!T_is_void) {
assert(static_halide_type() == t);
}
buf.type = t;
buf.dimensions = (int)sizes.size();
buf.host = (uint8_t *)data;
make_shape_storage();
initialize_shape(sizes);
}
/** Initialize an Buffer from a pointer to the min coordinate and
* an array describing the shape. Does not take ownership of the
* data, and does not set the host_dirty flag. */
explicit Buffer(halide_type_t t, add_const_if_T_is_const<void> *data, int d, const halide_dimension_t *shape) {
if (!T_is_void) {
assert(static_halide_type() == t);
}
buf.type = t;
buf.dimensions = d;
buf.host = (uint8_t *)data;
make_shape_storage();
for (int i = 0; i < d; i++) {
buf.dim[i] = shape[i];
}
}
/** Initialize an Buffer from a pointer to the min coordinate and
* an array describing the shape. Does not take ownership of the
* data and does not set the host_dirty flag. */
explicit Buffer(T *data, int d, const halide_dimension_t *shape) {
buf.type = halide_type_of<typename std::remove_cv<T>::type>();
buf.dimensions = d;
buf.host = (uint8_t *)data;
make_shape_storage();
for (int i = 0; i < d; i++) {
buf.dim[i] = shape[i];
}
}
/** Destructor. Will release any underlying owned allocation if
* this is the last reference to it. Will assert fail if there are
* weak references to this Buffer outstanding. */
~Buffer() {
free_shape_storage();
decref();
}
/** Get a pointer to the raw halide_buffer_t this wraps. */
// @{
halide_buffer_t *raw_buffer() {
return &buf;
}
const halide_buffer_t *raw_buffer() const {
return &buf;
}
// @}
/** Provide a cast operator to halide_buffer_t *, so that
* instances can be passed directly to Halide filters. */
operator halide_buffer_t *() {
return &buf;
}
/** Return a typed reference to this Buffer. Useful for converting
* a reference to a Buffer<void> to a reference to, for example, a
* Buffer<const uint8_t>. Does a runtime assert if the source
* buffer type is void. */
template<typename T2, int D2 = D,
typename = typename std::enable_if<(D2 <= D)>::type>
Buffer<T2, D2> &as() & {
Buffer<T2, D>::assert_can_convert_from(*this);
return *((Buffer<T2, D2> *)this);
}
/** Return a const typed reference to this Buffer. Useful for
* converting a conference reference to one Buffer type to a const
* reference to another Buffer type. Does a runtime assert if the
* source buffer type is void. */
template<typename T2, int D2 = D,
typename = typename std::enable_if<(D2 <= D)>::type>
const Buffer<T2, D2> &as() const & {
Buffer<T2, D>::assert_can_convert_from(*this);
return *((const Buffer<T2, D2> *)this);
}
/** Returns this rval Buffer with a different type attached. Does
* a dynamic type check if the source type is void. */
template<typename T2, int D2 = D>
Buffer<T2, D2> as() && {
Buffer<T2, D2>::assert_can_convert_from(*this);
return *((Buffer<T2, D2> *)this);
}
/** Conventional names for the first three dimensions. */
// @{
int width() const {
return (dimensions() > 0) ? dim(0).extent() : 1;
}
int height() const {
return (dimensions() > 1) ? dim(1).extent() : 1;
}
int channels() const {
return (dimensions() > 2) ? dim(2).extent() : 1;
}
// @}
/** Conventional names for the min and max value of each dimension */
// @{
int left() const {
return dim(0).min();
}
int right() const {
return dim(0).max();
}
int top() const {
return dim(1).min();
}
int bottom() const {
return dim(1).max();
}
// @}
/** Make a new image which is a deep copy of this image. Use crop
* or slice followed by copy to make a copy of only a portion of
* the image. The new image uses the same memory layout as the
* original, with holes compacted away. */
Buffer<T, D> copy(void *(*allocate_fn)(size_t) = nullptr,
void (*deallocate_fn)(void *) = nullptr) const {
Buffer<T, D> dst = make_with_shape_of(*this, allocate_fn, deallocate_fn);
dst.copy_from(*this);
return dst;
}
/** Fill a Buffer with the values at the same coordinates in
* another Buffer. Restricts itself to coordinates contained
* within the intersection of the two buffers. If the two Buffers
* are not in the same coordinate system, you will need to
* translate the argument Buffer first. E.g. if you're blitting a
* sprite onto a framebuffer, you'll want to translate the sprite
* to the correct location first like so: \code
* framebuffer.copy_from(sprite.translated({x, y})); \endcode
*/
template<typename T2, int D2>
void copy_from(const Buffer<T2, D2> &other) {
assert(!device_dirty() && "Cannot call Halide::Runtime::Buffer::copy_from on a device dirty destination.");
assert(!other.device_dirty() && "Cannot call Halide::Runtime::Buffer::copy_from on a device dirty source.");
Buffer<const T, D> src(other);