1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
|
#include "intel_be_batchbuffer.h"
#include "intel_be_context.h"
#include "intel_be_device.h"
#include "intel_be_fence.h"
#include <errno.h>
#include "util/u_memory.h"
struct intel_be_batchbuffer *
intel_be_batchbuffer_alloc(struct intel_be_context *intel)
{
struct intel_be_batchbuffer *batch = CALLOC_STRUCT(intel_be_batchbuffer);
batch->base.buffer = NULL;
batch->base.winsys = &intel->base;
batch->base.map = NULL;
batch->base.ptr = NULL;
batch->base.size = 0;
batch->base.actual_size = intel->device->max_batch_size;
batch->base.relocs = 0;
batch->base.max_relocs = INTEL_DEFAULT_RELOCS;
batch->base.map = malloc(batch->base.actual_size);
memset(batch->base.map, 0, batch->base.actual_size);
batch->base.ptr = batch->base.map;
intel_be_batchbuffer_reset(batch);
return batch;
}
void
intel_be_batchbuffer_reset(struct intel_be_batchbuffer *batch)
{
struct intel_be_context *intel = intel_be_context(batch->base.winsys);
struct intel_be_device *dev = intel->device;
if (batch->bo)
drm_intel_bo_unreference(batch->bo);
memset(batch->base.map, 0, batch->base.actual_size);
batch->base.ptr = batch->base.map;
batch->base.size = batch->base.actual_size - BATCH_RESERVED;
batch->base.relocs = 0;
batch->base.max_relocs = INTEL_DEFAULT_RELOCS;
batch->bo = drm_intel_bo_alloc(dev->pools.gem,
"gallium3d_batch_buffer",
batch->base.actual_size, 0);
}
int
intel_be_offset_relocation(struct intel_be_batchbuffer *batch,
unsigned pre_add,
drm_intel_bo *bo,
uint32_t read_domains,
uint32_t write_domain)
{
unsigned offset;
int ret = 0;
assert(batch->base.relocs < batch->base.max_relocs);
offset = (unsigned)(batch->base.ptr - batch->base.map);
ret = drm_intel_bo_emit_reloc(batch->bo, offset,
bo, pre_add,
read_domains,
write_domain);
((uint32_t*)batch->base.ptr)[0] = bo->offset = pre_add;
batch->base.ptr += 4;
if (!ret)
batch->base.relocs++;
return ret;
}
void
intel_be_batchbuffer_flush(struct intel_be_batchbuffer *batch,
struct intel_be_fence **fence)
{
struct i915_batchbuffer *i915 = &batch->base;
unsigned used = 0;
int ret = 0;
assert(i915_batchbuffer_space(i915) >= 0);
used = batch->base.ptr - batch->base.map;
assert((used & 3) == 0);
if (used & 4) {
((uint32_t *) batch->base.ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
((uint32_t *) batch->base.ptr)[1] = 0;
((uint32_t *) batch->base.ptr)[2] = (0xA<<23); // MI_BATCH_BUFFER_END;
batch->base.ptr += 12;
} else {
((uint32_t *) batch->base.ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
((uint32_t *) batch->base.ptr)[1] = (0xA<<23); // MI_BATCH_BUFFER_END;
batch->base.ptr += 8;
}
debug_printf("%s\n", __FUNCTION__);
used = batch->base.ptr - batch->base.map;
debug_printf(" - subdata\n");
drm_intel_bo_subdata(batch->bo, 0, used, batch->base.map);
debug_printf(" - exec\n");
ret = drm_intel_bo_exec(batch->bo, used, NULL, 0, 0);
debug_printf(" - waiting\n");
drm_intel_bo_wait_rendering(batch->bo);
debug_printf(" - done\n");
assert(ret == 0);
intel_be_batchbuffer_reset(batch);
if (fence) {
if (*fence)
intel_be_fence_unreference(*fence);
(*fence) = CALLOC_STRUCT(intel_be_fence);
(*fence)->refcount = 1;
(*fence)->bo = NULL;
}
}
void
intel_be_batchbuffer_finish(struct intel_be_batchbuffer *batch)
{
}
void
intel_be_batchbuffer_free(struct intel_be_batchbuffer *batch)
{
if (batch->bo)
drm_intel_bo_unreference(batch->bo);
free(batch->base.map);
free(batch);
}
|