1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
use std::{fmt, sync::Arc};
use parking_lot::Mutex;
use winapi::shared::winerror;
use crate::{command::CommandBuffer, Backend, Shared};
use hal::{command, pool};
pub struct PoolShared {
device: native::Device,
list_type: native::CmdListType,
allocators: Mutex<Vec<native::CommandAllocator>>,
lists: Mutex<Vec<native::GraphicsCommandList>>,
}
impl fmt::Debug for PoolShared {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
// TODO: print out as struct
fmt.write_str("PoolShared")
}
}
impl PoolShared {
pub fn acquire(&self) -> (native::CommandAllocator, native::GraphicsCommandList) {
let allocator = match self.allocators.lock().pop() {
Some(allocator) => allocator,
None => {
let (allocator, hr) = self.device.create_command_allocator(self.list_type);
assert_eq!(
winerror::S_OK,
hr,
"error on command allocator creation: {:x}",
hr
);
allocator
}
};
let list = match self.lists.lock().pop() {
Some(list) => {
list.reset(allocator, native::PipelineState::null());
list
}
None => {
let (command_list, hr) = self.device.create_graphics_command_list(
self.list_type,
allocator,
native::PipelineState::null(),
0,
);
assert_eq!(
hr,
winerror::S_OK,
"error on command list creation: {:x}",
hr
);
command_list
}
};
(allocator, list)
}
pub fn release_allocator(&self, allocator: native::CommandAllocator) {
self.allocators.lock().push(allocator);
}
pub fn release_list(&self, list: native::GraphicsCommandList) {
//pre-condition: list must be closed
self.lists.lock().push(list);
}
}
#[derive(Debug)]
pub struct CommandPool {
shared: Arc<Shared>,
pool_shared: Arc<PoolShared>,
}
unsafe impl Send for CommandPool {}
unsafe impl Sync for CommandPool {}
impl CommandPool {
pub(crate) fn new(
device: native::Device,
list_type: native::CmdListType,
shared: &Arc<Shared>,
_create_flags: pool::CommandPoolCreateFlags,
) -> Self {
let pool_shared = Arc::new(PoolShared {
device,
list_type,
allocators: Mutex::default(),
lists: Mutex::default(),
});
CommandPool {
shared: Arc::clone(shared),
pool_shared,
}
}
}
impl pool::CommandPool<Backend> for CommandPool {
unsafe fn reset(&mut self, _release_resources: bool) {
//do nothing. The allocated command buffers would not know
// that this happened, but they should be ready to
// process `begin` as if they are in `Initial` state.
}
unsafe fn allocate_one(&mut self, level: command::Level) -> CommandBuffer {
// TODO: Implement secondary buffers
assert_eq!(level, command::Level::Primary);
CommandBuffer::new(&self.shared, &self.pool_shared)
}
unsafe fn free<I>(&mut self, cbufs: I)
where
I: IntoIterator<Item = CommandBuffer>,
{
let mut allocators = self.pool_shared.allocators.lock();
let mut lists = self.pool_shared.lists.lock();
for cbuf in cbufs {
let (allocator, list) = cbuf.destroy();
allocators.extend(allocator);
lists.extend(list);
}
}
}
|