blob: 1d5a4983bd8a4ccd0c443b1c40d9a1a26190a2ba (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
|
use proc_macro2::TokenStream;
use quote::ToTokens;
use syn::{parse2, ItemMod};
use crate::BindgenOptions;
mod merge_extern_blocks;
mod sort_semantically;
use merge_extern_blocks::merge_extern_blocks;
use sort_semantically::sort_semantically;
struct PostProcessingPass {
should_run: fn(&BindgenOptions) -> bool,
run: fn(&mut ItemMod),
}
// TODO: This can be a const fn when mutable references are allowed in const
// context.
macro_rules! pass {
($pass:ident) => {
PostProcessingPass {
should_run: |options| options.$pass,
run: |item_mod| $pass(item_mod),
}
};
}
const PASSES: &[PostProcessingPass] =
&[pass!(merge_extern_blocks), pass!(sort_semantically)];
pub(crate) fn postprocessing(
items: Vec<TokenStream>,
options: &BindgenOptions,
) -> TokenStream {
let require_syn = PASSES.iter().any(|pass| (pass.should_run)(options));
if !require_syn {
return items.into_iter().collect();
}
let module_wrapped_tokens =
quote!(mod wrapper_for_postprocessing_hack { #( #items )* });
// This syn business is a hack, for now. This means that we are re-parsing already
// generated code using `syn` (as opposed to `quote`) because `syn` provides us more
// control over the elements.
// One caveat is that some of the items coming from `quote`d output might have
// multiple items within them. Hence, we have to wrap the incoming in a `mod`.
// The `unwrap` here is deliberate because bindgen should generate valid rust items at all
// times.
let mut item_mod = parse2::<ItemMod>(module_wrapped_tokens).unwrap();
for pass in PASSES {
if (pass.should_run)(options) {
(pass.run)(&mut item_mod);
}
}
let synful_items = item_mod
.content
.map(|(_, items)| items)
.unwrap_or_default()
.into_iter()
.map(|item| item.into_token_stream());
quote! { #( #synful_items )* }
}
|