summaryrefslogtreecommitdiffstats
path: root/src/test/ui/drop
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /src/test/ui/drop
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/test/ui/drop')
-rw-r--r--src/test/ui/drop/auxiliary/dropck_eyepatch_extern_crate.rs50
-rw-r--r--src/test/ui/drop/auxiliary/inline_dtor.rs8
-rw-r--r--src/test/ui/drop/auxiliary/issue-10028.rs9
-rw-r--r--src/test/ui/drop/drop-if-let-binding.rs7
-rw-r--r--src/test/ui/drop/drop-on-empty-block-exit.rs10
-rw-r--r--src/test/ui/drop/drop-on-ret.rs15
-rw-r--r--src/test/ui/drop/drop-struct-as-object.rs36
-rw-r--r--src/test/ui/drop/drop-trait-enum.rs94
-rw-r--r--src/test/ui/drop/drop-trait-generic.rs15
-rw-r--r--src/test/ui/drop/drop-trait.rs15
-rw-r--r--src/test/ui/drop/drop-uninhabited-enum.rs14
-rw-r--r--src/test/ui/drop/drop-with-type-ascription-1.rs8
-rw-r--r--src/test/ui/drop/drop-with-type-ascription-2.rs8
-rw-r--r--src/test/ui/drop/dropck-eyepatch-extern-crate.rs39
-rw-r--r--src/test/ui/drop/dropck-eyepatch-reorder.rs79
-rw-r--r--src/test/ui/drop/dropck-eyepatch.rs102
-rw-r--r--src/test/ui/drop/dropck_legal_cycles.rs1183
-rw-r--r--src/test/ui/drop/dynamic-drop-async.rs333
-rw-r--r--src/test/ui/drop/dynamic-drop.rs521
-rw-r--r--src/test/ui/drop/issue-10028.rs21
-rw-r--r--src/test/ui/drop/issue-30018-nopanic.rs103
-rw-r--r--src/test/ui/drop/issue-35546.rs20
-rw-r--r--src/test/ui/drop/issue-90752-raw-ptr-shenanigans.rs41
-rw-r--r--src/test/ui/drop/issue-90752.rs32
-rw-r--r--src/test/ui/drop/no-drop-flag-size.rs15
-rw-r--r--src/test/ui/drop/nondrop-cycle.rs31
-rw-r--r--src/test/ui/drop/repeat-drop-2.rs15
-rw-r--r--src/test/ui/drop/repeat-drop-2.stderr31
-rw-r--r--src/test/ui/drop/repeat-drop.rs121
-rw-r--r--src/test/ui/drop/terminate-in-initializer.rs34
-rw-r--r--src/test/ui/drop/use_inline_dtor.rs10
31 files changed, 3020 insertions, 0 deletions
diff --git a/src/test/ui/drop/auxiliary/dropck_eyepatch_extern_crate.rs b/src/test/ui/drop/auxiliary/dropck_eyepatch_extern_crate.rs
new file mode 100644
index 000000000..270d5de7a
--- /dev/null
+++ b/src/test/ui/drop/auxiliary/dropck_eyepatch_extern_crate.rs
@@ -0,0 +1,50 @@
+#![feature(dropck_eyepatch)]
+
+// The point of this test is to illustrate that the `#[may_dangle]`
+// attribute specifically allows, in the context of a type
+// implementing `Drop`, a generic parameter to be instantiated with a
+// lifetime that does not strictly outlive the owning type itself,
+// and that this attributes effects are preserved when importing
+// the type from another crate.
+//
+// See also dropck-eyepatch.rs for more information about the general
+// structure of the test.
+
+use std::cell::RefCell;
+
+pub trait Foo { fn foo(&self, _: &str); }
+
+pub struct Dt<A: Foo>(pub &'static str, pub A);
+pub struct Dr<'a, B:'a+Foo>(pub &'static str, pub &'a B);
+pub struct Pt<A,B: Foo>(pub &'static str, pub A, pub B);
+pub struct Pr<'a, 'b, B:'a+'b+Foo>(pub &'static str, pub &'a B, pub &'b B);
+pub struct St<A: Foo>(pub &'static str, pub A);
+pub struct Sr<'a, B:'a+Foo>(pub &'static str, pub &'a B);
+
+impl<A: Foo> Drop for Dt<A> {
+ fn drop(&mut self) { println!("drop {}", self.0); self.1.foo(self.0); }
+}
+impl<'a, B: Foo> Drop for Dr<'a, B> {
+ fn drop(&mut self) { println!("drop {}", self.0); self.1.foo(self.0); }
+}
+unsafe impl<#[may_dangle] A, B: Foo> Drop for Pt<A, B> {
+ // (unsafe to access self.1 due to #[may_dangle] on A)
+ fn drop(&mut self) { println!("drop {}", self.0); self.2.foo(self.0); }
+}
+unsafe impl<#[may_dangle] 'a, 'b, B: Foo> Drop for Pr<'a, 'b, B> {
+ // (unsafe to access self.1 due to #[may_dangle] on 'a)
+ fn drop(&mut self) { println!("drop {}", self.0); self.2.foo(self.0); }
+}
+
+impl Foo for RefCell<String> {
+ fn foo(&self, s: &str) {
+ let s2 = format!("{}|{}", *self.borrow(), s);
+ *self.borrow_mut() = s2;
+ }
+}
+
+impl<'a, T:Foo> Foo for &'a T {
+ fn foo(&self, s: &str) {
+ (*self).foo(s);
+ }
+}
diff --git a/src/test/ui/drop/auxiliary/inline_dtor.rs b/src/test/ui/drop/auxiliary/inline_dtor.rs
new file mode 100644
index 000000000..5eee89fdc
--- /dev/null
+++ b/src/test/ui/drop/auxiliary/inline_dtor.rs
@@ -0,0 +1,8 @@
+#![crate_name="inline_dtor"]
+
+pub struct Foo;
+
+impl Drop for Foo {
+ #[inline]
+ fn drop(&mut self) {}
+}
diff --git a/src/test/ui/drop/auxiliary/issue-10028.rs b/src/test/ui/drop/auxiliary/issue-10028.rs
new file mode 100644
index 000000000..135f26f40
--- /dev/null
+++ b/src/test/ui/drop/auxiliary/issue-10028.rs
@@ -0,0 +1,9 @@
+pub struct ZeroLengthThingWithDestructor;
+impl Drop for ZeroLengthThingWithDestructor {
+ fn drop(&mut self) {}
+}
+impl ZeroLengthThingWithDestructor {
+ pub fn new() -> ZeroLengthThingWithDestructor {
+ ZeroLengthThingWithDestructor
+ }
+}
diff --git a/src/test/ui/drop/drop-if-let-binding.rs b/src/test/ui/drop/drop-if-let-binding.rs
new file mode 100644
index 000000000..9c1ac4e0c
--- /dev/null
+++ b/src/test/ui/drop/drop-if-let-binding.rs
@@ -0,0 +1,7 @@
+// build-pass
+// regression test for issue #88307
+// compile-flags: -C opt-level=s
+
+fn main() {
+ if let Some(_val) = Option::<String>::None {}
+}
diff --git a/src/test/ui/drop/drop-on-empty-block-exit.rs b/src/test/ui/drop/drop-on-empty-block-exit.rs
new file mode 100644
index 000000000..ef3a90a53
--- /dev/null
+++ b/src/test/ui/drop/drop-on-empty-block-exit.rs
@@ -0,0 +1,10 @@
+// run-pass
+// pretty-expanded FIXME #23616
+#![allow(non_camel_case_types)]
+
+enum t { foo(Box<isize>), }
+
+pub fn main() {
+ let tt = t::foo(Box::new(10));
+ match tt { t::foo(_z) => { } }
+}
diff --git a/src/test/ui/drop/drop-on-ret.rs b/src/test/ui/drop/drop-on-ret.rs
new file mode 100644
index 000000000..290e274f3
--- /dev/null
+++ b/src/test/ui/drop/drop-on-ret.rs
@@ -0,0 +1,15 @@
+// run-pass
+
+
+
+// pretty-expanded FIXME #23616
+
+fn f() -> isize {
+ if true {
+ let _s: String = "should not leak".to_string();
+ return 1;
+ }
+ return 0;
+}
+
+pub fn main() { f(); }
diff --git a/src/test/ui/drop/drop-struct-as-object.rs b/src/test/ui/drop/drop-struct-as-object.rs
new file mode 100644
index 000000000..377027a4f
--- /dev/null
+++ b/src/test/ui/drop/drop-struct-as-object.rs
@@ -0,0 +1,36 @@
+// run-pass
+#![allow(unused_variables)]
+#![allow(non_upper_case_globals)]
+
+// Test that destructor on a struct runs successfully after the struct
+// is boxed and converted to an object.
+
+static mut value: usize = 0;
+
+struct Cat {
+ name : usize,
+}
+
+trait Dummy {
+ fn get(&self) -> usize;
+}
+
+impl Dummy for Cat {
+ fn get(&self) -> usize { self.name }
+}
+
+impl Drop for Cat {
+ fn drop(&mut self) {
+ unsafe { value = self.name; }
+ }
+}
+
+pub fn main() {
+ {
+ let x = Box::new(Cat {name: 22});
+ let nyan: Box<dyn Dummy> = x as Box<dyn Dummy>;
+ }
+ unsafe {
+ assert_eq!(value, 22);
+ }
+}
diff --git a/src/test/ui/drop/drop-trait-enum.rs b/src/test/ui/drop/drop-trait-enum.rs
new file mode 100644
index 000000000..d2b77650a
--- /dev/null
+++ b/src/test/ui/drop/drop-trait-enum.rs
@@ -0,0 +1,94 @@
+// run-pass
+#![allow(dead_code)]
+#![allow(unused_assignments)]
+#![allow(unused_variables)]
+// ignore-emscripten no threads support
+// needs-unwind
+
+use std::thread;
+use std::sync::mpsc::{channel, Sender};
+
+#[derive(PartialEq, Debug)]
+enum Message {
+ Dropped,
+ DestructorRan
+}
+
+struct SendOnDrop {
+ sender: Sender<Message>
+}
+
+impl Drop for SendOnDrop {
+ fn drop(&mut self) {
+ self.sender.send(Message::Dropped).unwrap();
+ }
+}
+
+enum Foo {
+ SimpleVariant(Sender<Message>),
+ NestedVariant(Box<usize>, SendOnDrop, Sender<Message>),
+ FailingVariant { on_drop: SendOnDrop }
+}
+
+impl Drop for Foo {
+ fn drop(&mut self) {
+ match self {
+ &mut Foo::SimpleVariant(ref mut sender) => {
+ sender.send(Message::DestructorRan).unwrap();
+ }
+ &mut Foo::NestedVariant(_, _, ref mut sender) => {
+ sender.send(Message::DestructorRan).unwrap();
+ }
+ &mut Foo::FailingVariant { .. } => {
+ panic!("Failed");
+ }
+ }
+ }
+}
+
+pub fn main() {
+ let (sender, receiver) = channel();
+ {
+ let v = Foo::SimpleVariant(sender);
+ }
+ assert_eq!(receiver.recv().unwrap(), Message::DestructorRan);
+ assert_eq!(receiver.recv().ok(), None);
+
+ let (sender, receiver) = channel();
+ {
+ let v = Foo::NestedVariant(Box::new(42), SendOnDrop { sender: sender.clone() }, sender);
+ }
+ assert_eq!(receiver.recv().unwrap(), Message::DestructorRan);
+ assert_eq!(receiver.recv().unwrap(), Message::Dropped);
+ assert_eq!(receiver.recv().ok(), None);
+
+ let (sender, receiver) = channel();
+ let t = thread::spawn(move|| {
+ let v = Foo::FailingVariant { on_drop: SendOnDrop { sender: sender } };
+ });
+ assert_eq!(receiver.recv().unwrap(), Message::Dropped);
+ assert_eq!(receiver.recv().ok(), None);
+ drop(t.join());
+
+ let (sender, receiver) = channel();
+ let t = {
+ thread::spawn(move|| {
+ let mut v = Foo::NestedVariant(Box::new(42), SendOnDrop {
+ sender: sender.clone()
+ }, sender.clone());
+ v = Foo::NestedVariant(Box::new(42),
+ SendOnDrop { sender: sender.clone() },
+ sender.clone());
+ v = Foo::SimpleVariant(sender.clone());
+ v = Foo::FailingVariant { on_drop: SendOnDrop { sender: sender } };
+ })
+ };
+ assert_eq!(receiver.recv().unwrap(), Message::DestructorRan);
+ assert_eq!(receiver.recv().unwrap(), Message::Dropped);
+ assert_eq!(receiver.recv().unwrap(), Message::DestructorRan);
+ assert_eq!(receiver.recv().unwrap(), Message::Dropped);
+ assert_eq!(receiver.recv().unwrap(), Message::DestructorRan);
+ assert_eq!(receiver.recv().unwrap(), Message::Dropped);
+ assert_eq!(receiver.recv().ok(), None);
+ drop(t.join());
+}
diff --git a/src/test/ui/drop/drop-trait-generic.rs b/src/test/ui/drop/drop-trait-generic.rs
new file mode 100644
index 000000000..cdefb680c
--- /dev/null
+++ b/src/test/ui/drop/drop-trait-generic.rs
@@ -0,0 +1,15 @@
+// run-pass
+#![allow(dead_code)]
+struct S<T> {
+ x: T
+}
+
+impl<T> ::std::ops::Drop for S<T> {
+ fn drop(&mut self) {
+ println!("bye");
+ }
+}
+
+pub fn main() {
+ let _x = S { x: 1 };
+}
diff --git a/src/test/ui/drop/drop-trait.rs b/src/test/ui/drop/drop-trait.rs
new file mode 100644
index 000000000..d93f77180
--- /dev/null
+++ b/src/test/ui/drop/drop-trait.rs
@@ -0,0 +1,15 @@
+// run-pass
+#![allow(dead_code)]
+struct Foo {
+ x: isize
+}
+
+impl Drop for Foo {
+ fn drop(&mut self) {
+ println!("bye");
+ }
+}
+
+pub fn main() {
+ let _x: Foo = Foo { x: 3 };
+}
diff --git a/src/test/ui/drop/drop-uninhabited-enum.rs b/src/test/ui/drop/drop-uninhabited-enum.rs
new file mode 100644
index 000000000..b3566f685
--- /dev/null
+++ b/src/test/ui/drop/drop-uninhabited-enum.rs
@@ -0,0 +1,14 @@
+// run-pass
+#![allow(dead_code)]
+#![allow(unused_variables)]
+// pretty-expanded FIXME #23616
+
+enum Foo { }
+
+impl Drop for Foo {
+ fn drop(&mut self) { }
+}
+
+fn foo(x: Foo) { }
+
+fn main() { }
diff --git a/src/test/ui/drop/drop-with-type-ascription-1.rs b/src/test/ui/drop/drop-with-type-ascription-1.rs
new file mode 100644
index 000000000..e5a1a48df
--- /dev/null
+++ b/src/test/ui/drop/drop-with-type-ascription-1.rs
@@ -0,0 +1,8 @@
+// run-pass
+
+fn main() {
+ let foo = "hello".to_string();
+ let foo: Vec<&str> = foo.split_whitespace().collect();
+ let invalid_string = &foo[0];
+ assert_eq!(*invalid_string, "hello");
+}
diff --git a/src/test/ui/drop/drop-with-type-ascription-2.rs b/src/test/ui/drop/drop-with-type-ascription-2.rs
new file mode 100644
index 000000000..fb70ad48e
--- /dev/null
+++ b/src/test/ui/drop/drop-with-type-ascription-2.rs
@@ -0,0 +1,8 @@
+// run-pass
+
+fn main() {
+ let args = vec!["foobie", "asdf::asdf"];
+ let arr: Vec<&str> = args[1].split("::").collect();
+ assert_eq!(arr[0], "asdf");
+ assert_eq!(arr[0], "asdf");
+}
diff --git a/src/test/ui/drop/dropck-eyepatch-extern-crate.rs b/src/test/ui/drop/dropck-eyepatch-extern-crate.rs
new file mode 100644
index 000000000..fecfd5edf
--- /dev/null
+++ b/src/test/ui/drop/dropck-eyepatch-extern-crate.rs
@@ -0,0 +1,39 @@
+// run-pass
+// aux-build:dropck_eyepatch_extern_crate.rs
+
+extern crate dropck_eyepatch_extern_crate as other;
+
+use other::{Dt,Dr,Pt,Pr,St,Sr};
+
+fn main() {
+ use std::cell::RefCell;
+
+ struct CheckOnDrop(RefCell<String>, &'static str);
+ impl Drop for CheckOnDrop {
+ fn drop(&mut self) { assert_eq!(*self.0.borrow(), self.1); }
+ }
+
+ let c_long;
+ let (c, dt, dr, pt, pr, st, sr)
+ : (CheckOnDrop, Dt<_>, Dr<_>, Pt<_, _>, Pr<_>, St<_>, Sr<_>);
+ c_long = CheckOnDrop(RefCell::new("c_long".to_string()),
+ "c_long|pr|pt|dr|dt");
+ c = CheckOnDrop(RefCell::new("c".to_string()),
+ "c");
+
+ // No error: sufficiently long-lived state can be referenced in dtors
+ dt = Dt("dt", &c_long.0);
+ dr = Dr("dr", &c_long.0);
+
+ // No error: Drop impl asserts .1 (A and &'a _) are not accessed
+ pt = Pt("pt", &c.0, &c_long.0);
+ pr = Pr("pr", &c.0, &c_long.0);
+
+ // No error: St and Sr have no destructor.
+ st = St("st", &c.0);
+ sr = Sr("sr", &c.0);
+
+ println!("{:?}", (dt.0, dr.0, pt.0, pr.0, st.0, sr.0));
+ assert_eq!(*c_long.0.borrow(), "c_long");
+ assert_eq!(*c.0.borrow(), "c");
+}
diff --git a/src/test/ui/drop/dropck-eyepatch-reorder.rs b/src/test/ui/drop/dropck-eyepatch-reorder.rs
new file mode 100644
index 000000000..0d7af3d4f
--- /dev/null
+++ b/src/test/ui/drop/dropck-eyepatch-reorder.rs
@@ -0,0 +1,79 @@
+// run-pass
+#![feature(dropck_eyepatch)]
+
+// The point of this test is to test uses of `#[may_dangle]` attribute
+// where the formal declaration order (in the impl generics) does not
+// match the actual usage order (in the type instantiation).
+//
+// See also dropck-eyepatch.rs for more information about the general
+// structure of the test.
+
+trait Foo { fn foo(&self, _: &str); }
+
+struct Dt<A: Foo>(&'static str, A);
+struct Dr<'a, B:'a+Foo>(&'static str, &'a B);
+struct Pt<A: Foo, B: Foo>(&'static str, #[allow(unused_tuple_struct_fields)] A, B);
+struct Pr<'a, 'b, B:'a+'b+Foo>(&'static str, #[allow(unused_tuple_struct_fields)] &'a B, &'b B);
+struct St<A: Foo>(&'static str, #[allow(unused_tuple_struct_fields)] A);
+struct Sr<'a, B:'a+Foo>(&'static str, #[allow(unused_tuple_struct_fields)] &'a B);
+
+impl<A: Foo> Drop for Dt<A> {
+ fn drop(&mut self) { println!("drop {}", self.0); self.1.foo(self.0); }
+}
+impl<'a, B: Foo> Drop for Dr<'a, B> {
+ fn drop(&mut self) { println!("drop {}", self.0); self.1.foo(self.0); }
+}
+unsafe impl<B: Foo, #[may_dangle] A: Foo> Drop for Pt<A, B> {
+ // (unsafe to access self.1 due to #[may_dangle] on A)
+ fn drop(&mut self) { println!("drop {}", self.0); self.2.foo(self.0); }
+}
+unsafe impl<'b, #[may_dangle] 'a, B: Foo> Drop for Pr<'a, 'b, B> {
+ // (unsafe to access self.1 due to #[may_dangle] on 'a)
+ fn drop(&mut self) { println!("drop {}", self.0); self.2.foo(self.0); }
+}
+
+fn main() {
+ use std::cell::RefCell;
+
+ impl Foo for RefCell<String> {
+ fn foo(&self, s: &str) {
+ let s2 = format!("{}|{}", *self.borrow(), s);
+ *self.borrow_mut() = s2;
+ }
+ }
+
+ impl<'a, T:Foo> Foo for &'a T {
+ fn foo(&self, s: &str) {
+ (*self).foo(s);
+ }
+ }
+
+ struct CheckOnDrop(RefCell<String>, &'static str);
+ impl Drop for CheckOnDrop {
+ fn drop(&mut self) { assert_eq!(*self.0.borrow(), self.1); }
+ }
+
+ let c_long;
+ let (c, dt, dr, pt, pr, st, sr)
+ : (CheckOnDrop, Dt<_>, Dr<_>, Pt<_, _>, Pr<_>, St<_>, Sr<_>);
+ c_long = CheckOnDrop(RefCell::new("c_long".to_string()),
+ "c_long|pr|pt|dr|dt");
+ c = CheckOnDrop(RefCell::new("c".to_string()),
+ "c");
+
+ // No error: sufficiently long-lived state can be referenced in dtors
+ dt = Dt("dt", &c_long.0);
+ dr = Dr("dr", &c_long.0);
+
+ // No error: Drop impl asserts .1 (A and &'a _) are not accessed
+ pt = Pt("pt", &c.0, &c_long.0);
+ pr = Pr("pr", &c.0, &c_long.0);
+
+ // No error: St and Sr have no destructor.
+ st = St("st", &c.0);
+ sr = Sr("sr", &c.0);
+
+ println!("{:?}", (dt.0, dr.0, pt.0, pr.0, st.0, sr.0));
+ assert_eq!(*c_long.0.borrow(), "c_long");
+ assert_eq!(*c.0.borrow(), "c");
+}
diff --git a/src/test/ui/drop/dropck-eyepatch.rs b/src/test/ui/drop/dropck-eyepatch.rs
new file mode 100644
index 000000000..3c4840d5c
--- /dev/null
+++ b/src/test/ui/drop/dropck-eyepatch.rs
@@ -0,0 +1,102 @@
+// run-pass
+#![feature(dropck_eyepatch)]
+
+// The point of this test is to illustrate that the `#[may_dangle]`
+// attribute specifically allows, in the context of a type
+// implementing `Drop`, a generic parameter to be instantiated with a
+// lifetime that does not strictly outlive the owning type itself.
+//
+// Here we test that a model use of `#[may_dangle]` will compile and run.
+//
+// The illustration is made concrete by comparison with two variations
+// on the type with `#[may_dangle]`:
+//
+// 1. an analogous type that does not implement `Drop` (and thus
+// should exhibit maximal flexibility with respect to dropck), and
+//
+// 2. an analogous type that does not use `#[may_dangle]` (and thus
+// should exhibit the standard limitations imposed by dropck.
+//
+// The types in this file follow a pattern, {D,P,S}{t,r}, where:
+//
+// - D means "I implement Drop"
+//
+// - P means "I implement Drop but guarantee my (first) parameter is
+// pure, i.e., not accessed from the destructor"; no other parameters
+// are pure.
+//
+// - S means "I do not implement Drop"
+//
+// - t suffix is used when the first generic is a type
+//
+// - r suffix is used when the first generic is a lifetime.
+
+trait Foo { fn foo(&self, _: &str); }
+
+struct Dt<A: Foo>(&'static str, A);
+struct Dr<'a, B:'a+Foo>(&'static str, &'a B);
+struct Pt<A,B: Foo>(&'static str, #[allow(unused_tuple_struct_fields)] A, B);
+struct Pr<'a, 'b, B:'a+'b+Foo>(&'static str, #[allow(unused_tuple_struct_fields)] &'a B, &'b B);
+struct St<A: Foo>(&'static str, #[allow(unused_tuple_struct_fields)] A);
+struct Sr<'a, B:'a+Foo>(&'static str, #[allow(unused_tuple_struct_fields)] &'a B);
+
+impl<A: Foo> Drop for Dt<A> {
+ fn drop(&mut self) { println!("drop {}", self.0); self.1.foo(self.0); }
+}
+impl<'a, B: Foo> Drop for Dr<'a, B> {
+ fn drop(&mut self) { println!("drop {}", self.0); self.1.foo(self.0); }
+}
+unsafe impl<#[may_dangle] A, B: Foo> Drop for Pt<A, B> {
+ // (unsafe to access self.1 due to #[may_dangle] on A)
+ fn drop(&mut self) { println!("drop {}", self.0); self.2.foo(self.0); }
+}
+unsafe impl<#[may_dangle] 'a, 'b, B: Foo> Drop for Pr<'a, 'b, B> {
+ // (unsafe to access self.1 due to #[may_dangle] on 'a)
+ fn drop(&mut self) { println!("drop {}", self.0); self.2.foo(self.0); }
+}
+
+fn main() {
+ use std::cell::RefCell;
+
+ impl Foo for RefCell<String> {
+ fn foo(&self, s: &str) {
+ let s2 = format!("{}|{}", *self.borrow(), s);
+ *self.borrow_mut() = s2;
+ }
+ }
+
+ impl<'a, T:Foo> Foo for &'a T {
+ fn foo(&self, s: &str) {
+ (*self).foo(s);
+ }
+ }
+
+ struct CheckOnDrop(RefCell<String>, &'static str);
+ impl Drop for CheckOnDrop {
+ fn drop(&mut self) { assert_eq!(*self.0.borrow(), self.1); }
+ }
+
+ let c_long;
+ let (c, dt, dr, pt, pr, st, sr)
+ : (CheckOnDrop, Dt<_>, Dr<_>, Pt<_, _>, Pr<_>, St<_>, Sr<_>);
+ c_long = CheckOnDrop(RefCell::new("c_long".to_string()),
+ "c_long|pr|pt|dr|dt");
+ c = CheckOnDrop(RefCell::new("c".to_string()),
+ "c");
+
+ // No error: sufficiently long-lived state can be referenced in dtors
+ dt = Dt("dt", &c_long.0);
+ dr = Dr("dr", &c_long.0);
+
+ // No error: Drop impl asserts .1 (A and &'a _) are not accessed
+ pt = Pt("pt", &c.0, &c_long.0);
+ pr = Pr("pr", &c.0, &c_long.0);
+
+ // No error: St and Sr have no destructor.
+ st = St("st", &c.0);
+ sr = Sr("sr", &c.0);
+
+ println!("{:?}", (dt.0, dr.0, pt.0, pr.0, st.0, sr.0));
+ assert_eq!(*c_long.0.borrow(), "c_long");
+ assert_eq!(*c.0.borrow(), "c");
+}
diff --git a/src/test/ui/drop/dropck_legal_cycles.rs b/src/test/ui/drop/dropck_legal_cycles.rs
new file mode 100644
index 000000000..27a599315
--- /dev/null
+++ b/src/test/ui/drop/dropck_legal_cycles.rs
@@ -0,0 +1,1183 @@
+// run-pass
+// This test exercises cases where cyclic structure is legal,
+// including when the cycles go through data-structures such
+// as `Vec` or `TypedArena`.
+//
+// The intent is to cover as many such cases as possible, ensuring
+// that if the compiler did not complain circa Rust 1.x (1.2 as of
+// this writing), then it will continue to not complain in the future.
+//
+// Note that while some of the tests are only exercising using the
+// given collection as a "backing store" for a set of nodes that hold
+// the actual cycle (and thus the cycle does not go through the
+// collection itself in such cases), in general we *do* want to make
+// sure to have at least one example exercising a cycle that goes
+// through the collection, for every collection type that supports
+// this.
+
+// HIGH LEVEL DESCRIPTION OF THE TEST ARCHITECTURE
+// -----------------------------------------------
+//
+// We pick a data structure and want to make a cyclic construction
+// from it. Each test of interest is labelled starting with "Cycle N:
+// { ... }" where N is the test number and the "..."`is filled in with
+// a graphviz-style description of the graph structure that the
+// author believes is being made. So "{ a -> b, b -> (c,d), (c,d) -> e }"
+// describes a line connected to a diamond:
+//
+// c
+// / \
+// a - b e
+// \ /
+// d
+//
+// (Note that the above directed graph is actually acyclic.)
+//
+// The different graph structures are often composed of different data
+// types. Some may be built atop `Vec`, others atop `HashMap`, etc.
+//
+// For each graph structure, we actually *confirm* that a cycle exists
+// (as a safe-guard against a test author accidentally leaving it out)
+// by traversing each graph and "proving" that a cycle exists within it.
+//
+// To do this, while trying to keep the code uniform (despite working
+// with different underlying collection and smart-pointer types), we
+// have a standard traversal API:
+//
+// 1. every node in the graph carries a `mark` (a u32, init'ed to 0).
+//
+// 2. every node provides a method to visit its children
+//
+// 3. a traversal attmepts to visit the nodes of the graph and prove that
+// it sees the same node twice. It does this by setting the mark of each
+// node to a fresh non-zero value, and if it sees the current mark, it
+// "knows" that it must have found a cycle, and stops attempting further
+// traversal.
+//
+// 4. each traversal is controlled by a bit-string that tells it which child
+// it visit when it can take different paths. As a simple example,
+// in a binary tree, 0 could mean "left" (and 1, "right"), so that
+// "00010" means "left, left, left, right, left". (In general it will
+// read as many bits as it needs to choose one child.)
+//
+// The graphs in this test are all meant to be very small, and thus
+// short bitstrings of less than 64 bits should always suffice.
+//
+// (An earlier version of this test infrastructure simply had any
+// given traversal visit all children it encountered, in a
+// depth-first manner; one problem with this approach is that an
+// acyclic graph can still have sharing, which would then be treated
+// as a repeat mark and reported as a detected cycle.)
+//
+// The travseral code is a little more complicated because it has been
+// programmed in a somewhat defensive manner. For example it also has
+// a max threshold for the number of nodes it will visit, to guard
+// against scenarios where the nodes are not correctly setting their
+// mark when asked. There are various other methods not discussed here
+// that are for aiding debugging the test when it runs, such as the
+// `name` method that all nodes provide.
+//
+// So each test:
+//
+// 1. allocates the nodes in the graph,
+//
+// 2. sets up the links in the graph,
+//
+// 3. clones the "ContextData"
+//
+// 4. chooses a new current mark value for this test
+//
+// 5. initiates a traversal, potentially from multiple starting points
+// (aka "roots"), with a given control-string (potentially a
+// different string for each root). if it does start from a
+// distinct root, then such a test should also increment the
+// current mark value, so that this traversal is considered
+// distinct from the prior one on this graph structure.
+//
+// Note that most of the tests work with the default control string
+// of all-zeroes.
+//
+// 6. assert that the context confirms that it actually saw a cycle (since a traversal
+// might have terminated, e.g., on a tree structure that contained no cycles).
+
+use std::cell::{Cell, RefCell};
+use std::cmp::Ordering;
+use std::collections::BinaryHeap;
+use std::collections::HashMap;
+use std::collections::LinkedList;
+use std::collections::VecDeque;
+use std::collections::btree_map::BTreeMap;
+use std::collections::btree_set::BTreeSet;
+use std::hash::{Hash, Hasher};
+use std::rc::Rc;
+use std::sync::{Arc, RwLock, Mutex};
+
+const PRINT: bool = false;
+
+pub fn main() {
+ let c_orig = ContextData {
+ curr_depth: 0,
+ max_depth: 3,
+ visited: 0,
+ max_visits: 1000,
+ skipped: 0,
+ curr_mark: 0,
+ saw_prev_marked: false,
+ control_bits: 0,
+ };
+
+ // SANITY CHECK FOR TEST SUITE (thus unnumbered)
+ // Not a cycle: { v[0] -> (v[1], v[2]), v[1] -> v[3], v[2] -> v[3] };
+ let v: Vec<S2> = vec![Named::new("s0"),
+ Named::new("s1"),
+ Named::new("s2"),
+ Named::new("s3")];
+ v[0].next.set((Some(&v[1]), Some(&v[2])));
+ v[1].next.set((Some(&v[3]), None));
+ v[2].next.set((Some(&v[3]), None));
+ v[3].next.set((None, None));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 10;
+ assert!(!c.saw_prev_marked);
+ v[0].descend_into_self(&mut c);
+ assert!(!c.saw_prev_marked); // <-- different from below, b/c acyclic above
+
+ if PRINT { println!(); }
+
+ // Cycle 1: { v[0] -> v[1], v[1] -> v[0] };
+ // does not exercise `v` itself
+ let v: Vec<S> = vec![Named::new("s0"),
+ Named::new("s1")];
+ v[0].next.set(Some(&v[1]));
+ v[1].next.set(Some(&v[0]));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 10;
+ assert!(!c.saw_prev_marked);
+ v[0].descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(); }
+
+ // Cycle 2: { v[0] -> v, v[1] -> v }
+ let v: V = Named::new("v");
+ v.contents[0].set(Some(&v));
+ v.contents[1].set(Some(&v));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 20;
+ assert!(!c.saw_prev_marked);
+ v.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(); }
+
+ // Cycle 3: { hk0 -> hv0, hv0 -> hk0, hk1 -> hv1, hv1 -> hk1 };
+ // does not exercise `h` itself
+
+ let mut h: HashMap<H,H> = HashMap::new();
+ h.insert(Named::new("hk0"), Named::new("hv0"));
+ h.insert(Named::new("hk1"), Named::new("hv1"));
+ for (key, val) in h.iter() {
+ val.next.set(Some(key));
+ key.next.set(Some(val));
+ }
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 30;
+ for (key, _) in h.iter() {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ key.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+ }
+
+ if PRINT { println!(); }
+
+ // Cycle 4: { h -> (hmk0,hmv0,hmk1,hmv1), {hmk0,hmv0,hmk1,hmv1} -> h }
+
+ let mut h: HashMap<HM,HM> = HashMap::new();
+ h.insert(Named::new("hmk0"), Named::new("hmv0"));
+ h.insert(Named::new("hmk0"), Named::new("hmv0"));
+ for (key, val) in h.iter() {
+ val.contents.set(Some(&h));
+ key.contents.set(Some(&h));
+ }
+
+ let mut c = c_orig.clone();
+ c.max_depth = 2;
+ c.curr_mark = 40;
+ for (key, _) in h.iter() {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ key.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+ // break;
+ }
+
+ if PRINT { println!(); }
+
+ // Cycle 5: { vd[0] -> vd[1], vd[1] -> vd[0] };
+ // does not exercise vd itself
+ let mut vd: VecDeque<S> = VecDeque::new();
+ vd.push_back(Named::new("d0"));
+ vd.push_back(Named::new("d1"));
+ vd[0].next.set(Some(&vd[1]));
+ vd[1].next.set(Some(&vd[0]));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 50;
+ assert!(!c.saw_prev_marked);
+ vd[0].descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(); }
+
+ // Cycle 6: { vd -> (vd0, vd1), {vd0, vd1} -> vd }
+ let mut vd: VecDeque<VD> = VecDeque::new();
+ vd.push_back(Named::new("vd0"));
+ vd.push_back(Named::new("vd1"));
+ vd[0].contents.set(Some(&vd));
+ vd[1].contents.set(Some(&vd));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 60;
+ assert!(!c.saw_prev_marked);
+ vd[0].descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(); }
+
+ // Cycle 7: { vm -> (vm0, vm1), {vm0, vm1} -> vm }
+ let mut vm: HashMap<usize, VM> = HashMap::new();
+ vm.insert(0, Named::new("vm0"));
+ vm.insert(1, Named::new("vm1"));
+ vm[&0].contents.set(Some(&vm));
+ vm[&1].contents.set(Some(&vm));
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 70;
+ assert!(!c.saw_prev_marked);
+ vm[&0].descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(); }
+
+ // Cycle 8: { ll -> (ll0, ll1), {ll0, ll1} -> ll }
+ let mut ll: LinkedList<LL> = LinkedList::new();
+ ll.push_back(Named::new("ll0"));
+ ll.push_back(Named::new("ll1"));
+ for e in &ll {
+ e.contents.set(Some(&ll));
+ }
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 80;
+ for e in &ll {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ e.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+ // break;
+ }
+
+ if PRINT { println!(); }
+
+ // Cycle 9: { bh -> (bh0, bh1), {bh0, bh1} -> bh }
+ let mut bh: BinaryHeap<BH> = BinaryHeap::new();
+ bh.push(Named::new("bh0"));
+ bh.push(Named::new("bh1"));
+ for b in bh.iter() {
+ b.contents.set(Some(&bh));
+ }
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 90;
+ for b in &bh {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ b.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+ // break;
+ }
+
+ if PRINT { println!(); }
+
+ // Cycle 10: { btm -> (btk0, btv1), {bt0, bt1} -> btm }
+ let mut btm: BTreeMap<BTM, BTM> = BTreeMap::new();
+ btm.insert(Named::new("btk0"), Named::new("btv0"));
+ btm.insert(Named::new("btk1"), Named::new("btv1"));
+ for (k, v) in btm.iter() {
+ k.contents.set(Some(&btm));
+ v.contents.set(Some(&btm));
+ }
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 100;
+ for (k, _) in &btm {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ k.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+ // break;
+ }
+
+ if PRINT { println!(); }
+
+ // Cycle 10: { bts -> (bts0, bts1), {bts0, bts1} -> btm }
+ let mut bts: BTreeSet<BTS> = BTreeSet::new();
+ bts.insert(Named::new("bts0"));
+ bts.insert(Named::new("bts1"));
+ for v in bts.iter() {
+ v.contents.set(Some(&bts));
+ }
+
+ let mut c = c_orig.clone();
+ c.curr_mark = 100;
+ for b in &bts {
+ c.curr_mark += 1;
+ c.saw_prev_marked = false;
+ b.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+ // break;
+ }
+
+ if PRINT { println!(); }
+
+ // Cycle 11: { rc0 -> (rc1, rc2), rc1 -> (), rc2 -> rc0 }
+ let (rc0, rc1, rc2): (RCRC, RCRC, RCRC);
+ rc0 = RCRC::new("rcrc0");
+ rc1 = RCRC::new("rcrc1");
+ rc2 = RCRC::new("rcrc2");
+ rc0.0.borrow_mut().children.0 = Some(&rc1);
+ rc0.0.borrow_mut().children.1 = Some(&rc2);
+ rc2.0.borrow_mut().children.0 = Some(&rc0);
+
+ let mut c = c_orig.clone();
+ c.control_bits = 0b1;
+ c.curr_mark = 110;
+ assert!(!c.saw_prev_marked);
+ rc0.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(); }
+
+ // We want to take the previous Rc case and generalize it to Arc.
+ //
+ // We can use refcells if we're single-threaded (as this test is).
+ // If one were to generalize these constructions to a
+ // multi-threaded context, then it might seem like we could choose
+ // between either an RwLock or a Mutex to hold the owned arcs on
+ // each node.
+ //
+ // Part of the point of this test is to actually confirm that the
+ // cycle exists by traversing it. We can do that just fine with an
+ // RwLock (since we can grab the child pointers in read-only
+ // mode), but we cannot lock a std::sync::Mutex to guard reading
+ // from each node via the same pattern, since once you hit the
+ // cycle, you'll be trying to acquiring the same lock twice.
+ // (We deal with this by exiting the traversal early if try_lock fails.)
+
+ // Cycle 12: { arc0 -> (arc1, arc2), arc1 -> (), arc2 -> arc0 }, refcells
+ let (arc0, arc1, arc2): (ARCRC, ARCRC, ARCRC);
+ arc0 = ARCRC::new("arcrc0");
+ arc1 = ARCRC::new("arcrc1");
+ arc2 = ARCRC::new("arcrc2");
+ arc0.0.borrow_mut().children.0 = Some(&arc1);
+ arc0.0.borrow_mut().children.1 = Some(&arc2);
+ arc2.0.borrow_mut().children.0 = Some(&arc0);
+
+ let mut c = c_orig.clone();
+ c.control_bits = 0b1;
+ c.curr_mark = 110;
+ assert!(!c.saw_prev_marked);
+ arc0.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(); }
+
+ // Cycle 13: { arc0 -> (arc1, arc2), arc1 -> (), arc2 -> arc0 }, rwlocks
+ let (arc0, arc1, arc2): (ARCRW, ARCRW, ARCRW);
+ arc0 = ARCRW::new("arcrw0");
+ arc1 = ARCRW::new("arcrw1");
+ arc2 = ARCRW::new("arcrw2");
+ arc0.0.write().unwrap().children.0 = Some(&arc1);
+ arc0.0.write().unwrap().children.1 = Some(&arc2);
+ arc2.0.write().unwrap().children.0 = Some(&arc0);
+
+ let mut c = c_orig.clone();
+ c.control_bits = 0b1;
+ c.curr_mark = 110;
+ assert!(!c.saw_prev_marked);
+ arc0.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+
+ if PRINT { println!(); }
+
+ // Cycle 14: { arc0 -> (arc1, arc2), arc1 -> (), arc2 -> arc0 }, mutexs
+ let (arc0, arc1, arc2): (ARCM, ARCM, ARCM);
+ arc0 = ARCM::new("arcm0");
+ arc1 = ARCM::new("arcm1");
+ arc2 = ARCM::new("arcm2");
+ arc0.1.lock().unwrap().children.0 = Some(&arc1);
+ arc0.1.lock().unwrap().children.1 = Some(&arc2);
+ arc2.1.lock().unwrap().children.0 = Some(&arc0);
+
+ let mut c = c_orig.clone();
+ c.control_bits = 0b1;
+ c.curr_mark = 110;
+ assert!(!c.saw_prev_marked);
+ arc0.descend_into_self(&mut c);
+ assert!(c.saw_prev_marked);
+}
+
+trait Named {
+ fn new(_: &'static str) -> Self;
+ fn name(&self) -> &str;
+}
+
+trait Marked<M> {
+ fn mark(&self) -> M;
+ fn set_mark(&self, mark: M);
+}
+
+struct S<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ next: Cell<Option<&'a S<'a>>>,
+}
+
+impl<'a> Named for S<'a> {
+ fn new(name: &'static str) -> S<'a> {
+ S { name: name, mark: Cell::new(0), next: Cell::new(None) }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for S<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+struct S2<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ next: Cell<(Option<&'a S2<'a>>, Option<&'a S2<'a>>)>,
+}
+
+impl<'a> Named for S2<'a> {
+ fn new(name: &'static str) -> S2<'a> {
+ S2 { name: name, mark: Cell::new(0), next: Cell::new((None, None)) }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for S2<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) {
+ self.mark.set(mark);
+ }
+}
+
+struct V<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Vec<Cell<Option<&'a V<'a>>>>,
+}
+
+impl<'a> Named for V<'a> {
+ fn new(name: &'static str) -> V<'a> {
+ V { name: name,
+ mark: Cell::new(0),
+ contents: vec![Cell::new(None), Cell::new(None)]
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for V<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+#[derive(Eq)]
+struct H<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ next: Cell<Option<&'a H<'a>>>,
+}
+
+impl<'a> Named for H<'a> {
+ fn new(name: &'static str) -> H<'a> {
+ H { name: name, mark: Cell::new(0), next: Cell::new(None) }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for H<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+impl<'a> PartialEq for H<'a> {
+ fn eq(&self, rhs: &H<'a>) -> bool {
+ self.name == rhs.name
+ }
+}
+
+impl<'a> Hash for H<'a> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.name.hash(state)
+ }
+}
+
+#[derive(Eq)]
+struct HM<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a HashMap<HM<'a>, HM<'a>>>>,
+}
+
+impl<'a> Named for HM<'a> {
+ fn new(name: &'static str) -> HM<'a> {
+ HM { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for HM<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+impl<'a> PartialEq for HM<'a> {
+ fn eq(&self, rhs: &HM<'a>) -> bool {
+ self.name == rhs.name
+ }
+}
+
+impl<'a> Hash for HM<'a> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.name.hash(state)
+ }
+}
+
+
+struct VD<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a VecDeque<VD<'a>>>>,
+}
+
+impl<'a> Named for VD<'a> {
+ fn new(name: &'static str) -> VD<'a> {
+ VD { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for VD<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+struct VM<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a HashMap<usize, VM<'a>>>>,
+}
+
+impl<'a> Named for VM<'a> {
+ fn new(name: &'static str) -> VM<'a> {
+ VM { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for VM<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+struct LL<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a LinkedList<LL<'a>>>>,
+}
+
+impl<'a> Named for LL<'a> {
+ fn new(name: &'static str) -> LL<'a> {
+ LL { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for LL<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+struct BH<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a BinaryHeap<BH<'a>>>>,
+}
+
+impl<'a> Named for BH<'a> {
+ fn new(name: &'static str) -> BH<'a> {
+ BH { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for BH<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+impl<'a> Eq for BH<'a> { }
+
+impl<'a> PartialEq for BH<'a> {
+ fn eq(&self, rhs: &BH<'a>) -> bool {
+ self.name == rhs.name
+ }
+}
+
+impl<'a> PartialOrd for BH<'a> {
+ fn partial_cmp(&self, rhs: &BH<'a>) -> Option<Ordering> {
+ Some(self.cmp(rhs))
+ }
+}
+
+impl<'a> Ord for BH<'a> {
+ fn cmp(&self, rhs: &BH<'a>) -> Ordering {
+ self.name.cmp(rhs.name)
+ }
+}
+
+struct BTM<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a BTreeMap<BTM<'a>, BTM<'a>>>>,
+}
+
+impl<'a> Named for BTM<'a> {
+ fn new(name: &'static str) -> BTM<'a> {
+ BTM { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for BTM<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+impl<'a> Eq for BTM<'a> { }
+
+impl<'a> PartialEq for BTM<'a> {
+ fn eq(&self, rhs: &BTM<'a>) -> bool {
+ self.name == rhs.name
+ }
+}
+
+impl<'a> PartialOrd for BTM<'a> {
+ fn partial_cmp(&self, rhs: &BTM<'a>) -> Option<Ordering> {
+ Some(self.cmp(rhs))
+ }
+}
+
+impl<'a> Ord for BTM<'a> {
+ fn cmp(&self, rhs: &BTM<'a>) -> Ordering {
+ self.name.cmp(rhs.name)
+ }
+}
+
+struct BTS<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ contents: Cell<Option<&'a BTreeSet<BTS<'a>>>>,
+}
+
+impl<'a> Named for BTS<'a> {
+ fn new(name: &'static str) -> BTS<'a> {
+ BTS { name: name,
+ mark: Cell::new(0),
+ contents: Cell::new(None)
+ }
+ }
+ fn name(&self) -> &str { self.name }
+}
+
+impl<'a> Marked<u32> for BTS<'a> {
+ fn mark(&self) -> u32 { self.mark.get() }
+ fn set_mark(&self, mark: u32) { self.mark.set(mark); }
+}
+
+impl<'a> Eq for BTS<'a> { }
+
+impl<'a> PartialEq for BTS<'a> {
+ fn eq(&self, rhs: &BTS<'a>) -> bool {
+ self.name == rhs.name
+ }
+}
+
+impl<'a> PartialOrd for BTS<'a> {
+ fn partial_cmp(&self, rhs: &BTS<'a>) -> Option<Ordering> {
+ Some(self.cmp(rhs))
+ }
+}
+
+impl<'a> Ord for BTS<'a> {
+ fn cmp(&self, rhs: &BTS<'a>) -> Ordering {
+ self.name.cmp(rhs.name)
+ }
+}
+
+#[derive(Clone)]
+struct RCRCData<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ children: (Option<&'a RCRC<'a>>, Option<&'a RCRC<'a>>),
+}
+#[derive(Clone)]
+struct RCRC<'a>(Rc<RefCell<RCRCData<'a>>>);
+
+impl<'a> Named for RCRC<'a> {
+ fn new(name: &'static str) -> Self {
+ RCRC(Rc::new(RefCell::new(RCRCData {
+ name: name, mark: Cell::new(0), children: (None, None), })))
+ }
+ fn name(&self) -> &str { self.0.borrow().name }
+}
+
+impl<'a> Marked<u32> for RCRC<'a> {
+ fn mark(&self) -> u32 { self.0.borrow().mark.get() }
+ fn set_mark(&self, mark: u32) { self.0.borrow().mark.set(mark); }
+}
+
+impl<'a> Children<'a> for RCRC<'a> {
+ fn count_children(&self) -> usize { 2 }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ let children = &self.0.borrow().children;
+ let child = match index {
+ 0 => if let Some(child) = children.0 { child } else { return; },
+ 1 => if let Some(child) = children.1 { child } else { return; },
+ _ => panic!("bad children"),
+ };
+ // println!("S2 {} descending into child {} at index {}", self.name, child.name, index);
+ child.descend_into_self(context);
+ }
+}
+#[derive(Clone)]
+struct ARCRCData<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ children: (Option<&'a ARCRC<'a>>, Option<&'a ARCRC<'a>>),
+}
+#[derive(Clone)]
+struct ARCRC<'a>(Arc<RefCell<ARCRCData<'a>>>);
+
+impl<'a> Named for ARCRC<'a> {
+ fn new(name: &'static str) -> Self {
+ ARCRC(Arc::new(RefCell::new(ARCRCData {
+ name: name, mark: Cell::new(0), children: (None, None), })))
+ }
+ fn name(&self) -> &str { self.0.borrow().name }
+}
+
+impl<'a> Marked<u32> for ARCRC<'a> {
+ fn mark(&self) -> u32 { self.0.borrow().mark.get() }
+ fn set_mark(&self, mark: u32) { self.0.borrow().mark.set(mark); }
+}
+
+impl<'a> Children<'a> for ARCRC<'a> {
+ fn count_children(&self) -> usize { 2 }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ let children = &self.0.borrow().children;
+ match index {
+ 0 => if let Some(ref child) = children.0 {
+ child.descend_into_self(context);
+ },
+ 1 => if let Some(ref child) = children.1 {
+ child.descend_into_self(context);
+ },
+ _ => panic!("bad children!"),
+ }
+ }
+}
+
+#[derive(Clone)]
+struct ARCMData<'a> {
+ mark: Cell<u32>,
+ children: (Option<&'a ARCM<'a>>, Option<&'a ARCM<'a>>),
+}
+
+#[derive(Clone)]
+struct ARCM<'a>(&'static str, Arc<Mutex<ARCMData<'a>>>);
+
+impl<'a> Named for ARCM<'a> {
+ fn new(name: &'static str) -> Self {
+ ARCM(name, Arc::new(Mutex::new(ARCMData {
+ mark: Cell::new(0), children: (None, None), })))
+ }
+ fn name(&self) -> &str { self.0 }
+}
+
+impl<'a> Marked<u32> for ARCM<'a> {
+ fn mark(&self) -> u32 { self.1.lock().unwrap().mark.get() }
+ fn set_mark(&self, mark: u32) { self.1.lock().unwrap().mark.set(mark); }
+}
+
+impl<'a> Children<'a> for ARCM<'a> {
+ fn count_children(&self) -> usize { 2 }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ let ref children = if let Ok(data) = self.1.try_lock() {
+ data.children
+ } else { return; };
+ match index {
+ 0 => if let Some(ref child) = children.0 {
+ child.descend_into_self(context);
+ },
+ 1 => if let Some(ref child) = children.1 {
+ child.descend_into_self(context);
+ },
+ _ => panic!("bad children!"),
+ }
+ }
+}
+
+#[derive(Clone)]
+struct ARCRWData<'a> {
+ name: &'static str,
+ mark: Cell<u32>,
+ children: (Option<&'a ARCRW<'a>>, Option<&'a ARCRW<'a>>),
+}
+
+#[derive(Clone)]
+struct ARCRW<'a>(Arc<RwLock<ARCRWData<'a>>>);
+
+impl<'a> Named for ARCRW<'a> {
+ fn new(name: &'static str) -> Self {
+ ARCRW(Arc::new(RwLock::new(ARCRWData {
+ name: name, mark: Cell::new(0), children: (None, None), })))
+ }
+ fn name(&self) -> &str { self.0.read().unwrap().name }
+}
+
+impl<'a> Marked<u32> for ARCRW<'a> {
+ fn mark(&self) -> u32 { self.0.read().unwrap().mark.get() }
+ fn set_mark(&self, mark: u32) { self.0.read().unwrap().mark.set(mark); }
+}
+
+impl<'a> Children<'a> for ARCRW<'a> {
+ fn count_children(&self) -> usize { 2 }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ let children = &self.0.read().unwrap().children;
+ match index {
+ 0 => if let Some(ref child) = children.0 {
+ child.descend_into_self(context);
+ },
+ 1 => if let Some(ref child) = children.1 {
+ child.descend_into_self(context);
+ },
+ _ => panic!("bad children!"),
+ }
+ }
+}
+
+trait Context {
+ fn next_index(&mut self, len: usize) -> usize;
+ fn should_act(&self) -> bool;
+ fn increase_visited(&mut self);
+ fn increase_skipped(&mut self);
+ fn increase_depth(&mut self);
+ fn decrease_depth(&mut self);
+}
+
+trait PrePost<T> {
+ fn pre(&mut self, _: &T);
+ fn post(&mut self, _: &T);
+ fn hit_limit(&mut self, _: &T);
+}
+
+trait Children<'a> {
+ fn count_children(&self) -> usize;
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<Self>, Self: Sized;
+
+ fn next_child<C>(&self, context: &mut C)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ let index = context.next_index(self.count_children());
+ self.descend_one_child(context, index);
+ }
+
+ fn descend_into_self<C>(&self, context: &mut C)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ context.pre(self);
+ if context.should_act() {
+ context.increase_visited();
+ context.increase_depth();
+ self.next_child(context);
+ context.decrease_depth();
+ } else {
+ context.hit_limit(self);
+ context.increase_skipped();
+ }
+ context.post(self);
+ }
+
+ fn descend<'b, C>(&self, c: &Cell<Option<&'b Self>>, context: &mut C)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ if let Some(r) = c.get() {
+ r.descend_into_self(context);
+ }
+ }
+}
+
+impl<'a> Children<'a> for S<'a> {
+ fn count_children(&self) -> usize { 1 }
+ fn descend_one_child<C>(&self, context: &mut C, _: usize)
+ where C: Context + PrePost<Self>, Self: Sized {
+ self.descend(&self.next, context);
+ }
+}
+
+impl<'a> Children<'a> for S2<'a> {
+ fn count_children(&self) -> usize { 2 }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ let children = self.next.get();
+ let child = match index {
+ 0 => if let Some(child) = children.0 { child } else { return; },
+ 1 => if let Some(child) = children.1 { child } else { return; },
+ _ => panic!("bad children"),
+ };
+ // println!("S2 {} descending into child {} at index {}", self.name, child.name, index);
+ child.descend_into_self(context);
+ }
+}
+
+impl<'a> Children<'a> for V<'a> {
+ fn count_children(&self) -> usize { self.contents.len() }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ if let Some(child) = self.contents[index].get() {
+ child.descend_into_self(context);
+ }
+ }
+}
+
+impl<'a> Children<'a> for H<'a> {
+ fn count_children(&self) -> usize { 1 }
+ fn descend_one_child<C>(&self, context: &mut C, _: usize)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ self.descend(&self.next, context);
+ }
+}
+
+impl<'a> Children<'a> for HM<'a> {
+ fn count_children(&self) -> usize {
+ if let Some(m) = self.contents.get() { 2 * m.iter().count() } else { 0 }
+ }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ if let Some(ref hm) = self.contents.get() {
+ for (k, v) in hm.iter().nth(index / 2) {
+ [k, v][index % 2].descend_into_self(context);
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for VD<'a> {
+ fn count_children(&self) -> usize {
+ if let Some(d) = self.contents.get() { d.iter().count() } else { 0 }
+ }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<Self>, Self: Sized
+ {
+ if let Some(ref vd) = self.contents.get() {
+ for r in vd.iter().nth(index) {
+ r.descend_into_self(context);
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for VM<'a> {
+ fn count_children(&self) -> usize {
+ if let Some(m) = self.contents.get() { m.iter().count() } else { 0 }
+ }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<VM<'a>>
+ {
+ if let Some(ref vd) = self.contents.get() {
+ for (_idx, r) in vd.iter().nth(index) {
+ r.descend_into_self(context);
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for LL<'a> {
+ fn count_children(&self) -> usize {
+ if let Some(l) = self.contents.get() { l.iter().count() } else { 0 }
+ }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<LL<'a>>
+ {
+ if let Some(ref ll) = self.contents.get() {
+ for r in ll.iter().nth(index) {
+ r.descend_into_self(context);
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for BH<'a> {
+ fn count_children(&self) -> usize {
+ if let Some(h) = self.contents.get() { h.iter().count() } else { 0 }
+ }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<BH<'a>>
+ {
+ if let Some(ref bh) = self.contents.get() {
+ for r in bh.iter().nth(index) {
+ r.descend_into_self(context);
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for BTM<'a> {
+ fn count_children(&self) -> usize {
+ if let Some(m) = self.contents.get() { 2 * m.iter().count() } else { 0 }
+ }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<BTM<'a>>
+ {
+ if let Some(ref bh) = self.contents.get() {
+ for (k, v) in bh.iter().nth(index / 2) {
+ [k, v][index % 2].descend_into_self(context);
+ }
+ }
+ }
+}
+
+impl<'a> Children<'a> for BTS<'a> {
+ fn count_children(&self) -> usize {
+ if let Some(s) = self.contents.get() { s.iter().count() } else { 0 }
+ }
+ fn descend_one_child<C>(&self, context: &mut C, index: usize)
+ where C: Context + PrePost<BTS<'a>>
+ {
+ if let Some(ref bh) = self.contents.get() {
+ for r in bh.iter().nth(index) {
+ r.descend_into_self(context);
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+struct ContextData {
+ curr_depth: usize,
+ max_depth: usize,
+ visited: usize,
+ max_visits: usize,
+ skipped: usize,
+ curr_mark: u32,
+ saw_prev_marked: bool,
+ control_bits: u64,
+}
+
+impl Context for ContextData {
+ fn next_index(&mut self, len: usize) -> usize {
+ if len < 2 { return 0; }
+ let mut pow2 = len.next_power_of_two();
+ let _pow2_orig = pow2;
+ let mut idx = 0;
+ let mut bits = self.control_bits;
+ while pow2 > 1 {
+ idx = (idx << 1) | (bits & 1) as usize;
+ bits = bits >> 1;
+ pow2 = pow2 >> 1;
+ }
+ idx = idx % len;
+ // println!("next_index({} [{:b}]) says {}, pre(bits): {:b} post(bits): {:b}",
+ // len, _pow2_orig, idx, self.control_bits, bits);
+ self.control_bits = bits;
+ return idx;
+ }
+ fn should_act(&self) -> bool {
+ self.curr_depth < self.max_depth && self.visited < self.max_visits
+ }
+ fn increase_visited(&mut self) { self.visited += 1; }
+ fn increase_skipped(&mut self) { self.skipped += 1; }
+ fn increase_depth(&mut self) { self.curr_depth += 1; }
+ fn decrease_depth(&mut self) { self.curr_depth -= 1; }
+}
+
+impl<T:Named+Marked<u32>> PrePost<T> for ContextData {
+ fn pre(&mut self, t: &T) {
+ for _ in 0..self.curr_depth {
+ if PRINT { print!(" "); }
+ }
+ if PRINT { println!("prev {}", t.name()); }
+ if t.mark() == self.curr_mark {
+ for _ in 0..self.curr_depth {
+ if PRINT { print!(" "); }
+ }
+ if PRINT { println!("(probably previously marked)"); }
+ self.saw_prev_marked = true;
+ }
+ t.set_mark(self.curr_mark);
+ }
+ fn post(&mut self, t: &T) {
+ for _ in 0..self.curr_depth {
+ if PRINT { print!(" "); }
+ }
+ if PRINT { println!("post {}", t.name()); }
+ }
+ fn hit_limit(&mut self, t: &T) {
+ for _ in 0..self.curr_depth {
+ if PRINT { print!(" "); }
+ }
+ if PRINT { println!("LIMIT {}", t.name()); }
+ }
+}
diff --git a/src/test/ui/drop/dynamic-drop-async.rs b/src/test/ui/drop/dynamic-drop-async.rs
new file mode 100644
index 000000000..13bd71ecb
--- /dev/null
+++ b/src/test/ui/drop/dynamic-drop-async.rs
@@ -0,0 +1,333 @@
+// Test that values are not leaked in async functions, even in the cases where:
+// * Dropping one of the values panics while running the future.
+// * The future is dropped at one of its suspend points.
+// * Dropping one of the values panics while dropping the future.
+
+// run-pass
+// needs-unwind
+// edition:2018
+// ignore-wasm32-bare compiled with panic=abort by default
+
+#![allow(unused)]
+
+use std::{
+ cell::{Cell, RefCell},
+ future::Future,
+ marker::Unpin,
+ panic,
+ pin::Pin,
+ ptr,
+ rc::Rc,
+ task::{Context, Poll, RawWaker, RawWakerVTable, Waker},
+};
+
+struct InjectedFailure;
+
+struct Defer<T> {
+ ready: bool,
+ value: Option<T>,
+}
+
+impl<T: Unpin> Future for Defer<T> {
+ type Output = T;
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
+ if self.ready {
+ Poll::Ready(self.value.take().unwrap())
+ } else {
+ self.ready = true;
+ Poll::Pending
+ }
+ }
+}
+
+/// Allocator tracks the creation and destruction of `Ptr`s.
+/// The `failing_op`-th operation will panic.
+struct Allocator {
+ data: RefCell<Vec<bool>>,
+ failing_op: usize,
+ cur_ops: Cell<usize>,
+}
+
+impl panic::UnwindSafe for Allocator {}
+impl panic::RefUnwindSafe for Allocator {}
+
+impl Drop for Allocator {
+ fn drop(&mut self) {
+ let data = self.data.borrow();
+ if data.iter().any(|d| *d) {
+ panic!("missing free: {:?}", data);
+ }
+ }
+}
+
+impl Allocator {
+ fn new(failing_op: usize) -> Self {
+ Allocator { failing_op, cur_ops: Cell::new(0), data: RefCell::new(vec![]) }
+ }
+ fn alloc(&self) -> impl Future<Output = Ptr<'_>> + '_ {
+ self.fallible_operation();
+
+ let mut data = self.data.borrow_mut();
+
+ let addr = data.len();
+ data.push(true);
+ Defer { ready: false, value: Some(Ptr(addr, self)) }
+ }
+ fn fallible_operation(&self) {
+ self.cur_ops.set(self.cur_ops.get() + 1);
+
+ if self.cur_ops.get() == self.failing_op {
+ panic::panic_any(InjectedFailure);
+ }
+ }
+}
+
+// Type that tracks whether it was dropped and can panic when it's created or
+// destroyed.
+struct Ptr<'a>(usize, &'a Allocator);
+impl<'a> Drop for Ptr<'a> {
+ fn drop(&mut self) {
+ match self.1.data.borrow_mut()[self.0] {
+ false => panic!("double free at index {:?}", self.0),
+ ref mut d => *d = false,
+ }
+
+ self.1.fallible_operation();
+ }
+}
+
+async fn dynamic_init(a: Rc<Allocator>, c: bool) {
+ let _x;
+ if c {
+ _x = Some(a.alloc().await);
+ }
+}
+
+async fn dynamic_drop(a: Rc<Allocator>, c: bool) {
+ let x = a.alloc().await;
+ if c {
+ Some(x)
+ } else {
+ None
+ };
+}
+
+struct TwoPtrs<'a>(Ptr<'a>, Ptr<'a>);
+async fn struct_dynamic_drop(a: Rc<Allocator>, c0: bool, c1: bool, c: bool) {
+ for i in 0..2 {
+ let x;
+ let y;
+ if (c0 && i == 0) || (c1 && i == 1) {
+ x = (a.alloc().await, a.alloc().await, a.alloc().await);
+ y = TwoPtrs(a.alloc().await, a.alloc().await);
+ if c {
+ drop(x.1);
+ a.alloc().await;
+ drop(y.0);
+ a.alloc().await;
+ }
+ }
+ }
+}
+
+async fn field_assignment(a: Rc<Allocator>, c0: bool) {
+ let mut x = (TwoPtrs(a.alloc().await, a.alloc().await), a.alloc().await);
+
+ x.1 = a.alloc().await;
+ x.1 = a.alloc().await;
+
+ let f = (x.0).0;
+ a.alloc().await;
+ if c0 {
+ (x.0).0 = f;
+ }
+ a.alloc().await;
+}
+
+async fn assignment(a: Rc<Allocator>, c0: bool, c1: bool) {
+ let mut _v = a.alloc().await;
+ let mut _w = a.alloc().await;
+ if c0 {
+ drop(_v);
+ }
+ _v = _w;
+ if c1 {
+ _w = a.alloc().await;
+ }
+}
+
+async fn array_simple(a: Rc<Allocator>) {
+ let _x = [a.alloc().await, a.alloc().await, a.alloc().await, a.alloc().await];
+}
+
+async fn vec_simple(a: Rc<Allocator>) {
+ let _x = vec![a.alloc().await, a.alloc().await, a.alloc().await, a.alloc().await];
+}
+
+async fn mixed_drop_and_nondrop(a: Rc<Allocator>) {
+ // check that destructor panics handle drop
+ // and non-drop blocks in the same scope correctly.
+ //
+ // Surprisingly enough, this used to not work.
+ let (x, y, z);
+ x = a.alloc().await;
+ y = 5;
+ z = a.alloc().await;
+}
+
+#[allow(unreachable_code)]
+async fn vec_unreachable(a: Rc<Allocator>) {
+ let _x = vec![a.alloc().await, a.alloc().await, a.alloc().await, return];
+}
+
+async fn slice_pattern_one_of(a: Rc<Allocator>, i: usize) {
+ let array = [a.alloc().await, a.alloc().await, a.alloc().await, a.alloc().await];
+ let _x = match i {
+ 0 => {
+ let [a, ..] = array;
+ a
+ }
+ 1 => {
+ let [_, a, ..] = array;
+ a
+ }
+ 2 => {
+ let [_, _, a, _] = array;
+ a
+ }
+ 3 => {
+ let [_, _, _, a] = array;
+ a
+ }
+ _ => panic!("unmatched"),
+ };
+ a.alloc().await;
+}
+
+async fn subslice_pattern_from_end_with_drop(a: Rc<Allocator>, arg: bool, arg2: bool) {
+ let arr = [a.alloc().await, a.alloc().await, a.alloc().await, a.alloc().await, a.alloc().await];
+ if arg2 {
+ drop(arr);
+ return;
+ }
+
+ if arg {
+ let [.., _x, _] = arr;
+ } else {
+ let [_, _y @ ..] = arr;
+ }
+ a.alloc().await;
+}
+
+async fn subslice_pattern_reassign(a: Rc<Allocator>) {
+ let mut ar = [a.alloc().await, a.alloc().await, a.alloc().await];
+ let [_, _, _x] = ar;
+ ar = [a.alloc().await, a.alloc().await, a.alloc().await];
+ let [_, _y @ ..] = ar;
+ a.alloc().await;
+}
+
+async fn move_ref_pattern(a: Rc<Allocator>) {
+ let mut tup = (a.alloc().await, a.alloc().await, a.alloc().await, a.alloc().await);
+ let (ref _a, ref mut _b, _c, mut _d) = tup;
+ a.alloc().await;
+}
+
+fn run_test<F, G>(cx: &mut Context<'_>, ref f: F)
+where
+ F: Fn(Rc<Allocator>) -> G,
+ G: Future<Output = ()>,
+{
+ for polls in 0.. {
+ // Run without any panics to find which operations happen after the
+ // penultimate `poll`.
+ let first_alloc = Rc::new(Allocator::new(usize::MAX));
+ let mut fut = Box::pin(f(first_alloc.clone()));
+ let mut ops_before_last_poll = 0;
+ let mut completed = false;
+ for _ in 0..polls {
+ ops_before_last_poll = first_alloc.cur_ops.get();
+ if let Poll::Ready(()) = fut.as_mut().poll(cx) {
+ completed = true;
+ }
+ }
+ drop(fut);
+
+ // Start at `ops_before_last_poll` so that we will always be able to
+ // `poll` the expected number of times.
+ for failing_op in ops_before_last_poll..first_alloc.cur_ops.get() {
+ let alloc = Rc::new(Allocator::new(failing_op + 1));
+ let f = &f;
+ let cx = &mut *cx;
+ let result = panic::catch_unwind(panic::AssertUnwindSafe(move || {
+ let mut fut = Box::pin(f(alloc));
+ for _ in 0..polls {
+ let _ = fut.as_mut().poll(cx);
+ }
+ drop(fut);
+ }));
+ match result {
+ Ok(..) => panic!("test executed more ops on first call"),
+ Err(e) => {
+ if e.downcast_ref::<InjectedFailure>().is_none() {
+ panic::resume_unwind(e);
+ }
+ }
+ }
+ }
+
+ if completed {
+ break;
+ }
+ }
+}
+
+fn clone_waker(data: *const ()) -> RawWaker {
+ RawWaker::new(data, &RawWakerVTable::new(clone_waker, drop, drop, drop))
+}
+
+fn main() {
+ let waker = unsafe { Waker::from_raw(clone_waker(ptr::null())) };
+ let context = &mut Context::from_waker(&waker);
+
+ run_test(context, |a| dynamic_init(a, false));
+ run_test(context, |a| dynamic_init(a, true));
+ run_test(context, |a| dynamic_drop(a, false));
+ run_test(context, |a| dynamic_drop(a, true));
+
+ run_test(context, |a| assignment(a, false, false));
+ run_test(context, |a| assignment(a, false, true));
+ run_test(context, |a| assignment(a, true, false));
+ run_test(context, |a| assignment(a, true, true));
+
+ run_test(context, |a| array_simple(a));
+ run_test(context, |a| vec_simple(a));
+ run_test(context, |a| vec_unreachable(a));
+
+ run_test(context, |a| struct_dynamic_drop(a, false, false, false));
+ run_test(context, |a| struct_dynamic_drop(a, false, false, true));
+ run_test(context, |a| struct_dynamic_drop(a, false, true, false));
+ run_test(context, |a| struct_dynamic_drop(a, false, true, true));
+ run_test(context, |a| struct_dynamic_drop(a, true, false, false));
+ run_test(context, |a| struct_dynamic_drop(a, true, false, true));
+ run_test(context, |a| struct_dynamic_drop(a, true, true, false));
+ run_test(context, |a| struct_dynamic_drop(a, true, true, true));
+
+ run_test(context, |a| field_assignment(a, false));
+ run_test(context, |a| field_assignment(a, true));
+
+ run_test(context, |a| mixed_drop_and_nondrop(a));
+
+ run_test(context, |a| slice_pattern_one_of(a, 0));
+ run_test(context, |a| slice_pattern_one_of(a, 1));
+ run_test(context, |a| slice_pattern_one_of(a, 2));
+ run_test(context, |a| slice_pattern_one_of(a, 3));
+
+ run_test(context, |a| subslice_pattern_from_end_with_drop(a, true, true));
+ run_test(context, |a| subslice_pattern_from_end_with_drop(a, true, false));
+ run_test(context, |a| subslice_pattern_from_end_with_drop(a, false, true));
+ run_test(context, |a| subslice_pattern_from_end_with_drop(a, false, false));
+ run_test(context, |a| subslice_pattern_reassign(a));
+
+ run_test(context, |a| move_ref_pattern(a));
+}
diff --git a/src/test/ui/drop/dynamic-drop.rs b/src/test/ui/drop/dynamic-drop.rs
new file mode 100644
index 000000000..e70686774
--- /dev/null
+++ b/src/test/ui/drop/dynamic-drop.rs
@@ -0,0 +1,521 @@
+// run-pass
+// needs-unwind
+// ignore-wasm32-bare compiled with panic=abort by default
+
+#![feature(generators, generator_trait)]
+
+#![allow(unused_assignments)]
+#![allow(unused_variables)]
+
+use std::cell::{Cell, RefCell};
+use std::mem::ManuallyDrop;
+use std::ops::Generator;
+use std::panic;
+use std::pin::Pin;
+
+struct InjectedFailure;
+
+struct Allocator {
+ data: RefCell<Vec<bool>>,
+ failing_op: usize,
+ cur_ops: Cell<usize>,
+}
+
+impl panic::UnwindSafe for Allocator {}
+impl panic::RefUnwindSafe for Allocator {}
+
+impl Drop for Allocator {
+ fn drop(&mut self) {
+ let data = self.data.borrow();
+ if data.iter().any(|d| *d) {
+ panic!("missing free: {:?}", data);
+ }
+ }
+}
+
+impl Allocator {
+ fn new(failing_op: usize) -> Self {
+ Allocator {
+ failing_op: failing_op,
+ cur_ops: Cell::new(0),
+ data: RefCell::new(vec![])
+ }
+ }
+ fn alloc(&self) -> Ptr<'_> {
+ self.cur_ops.set(self.cur_ops.get() + 1);
+
+ if self.cur_ops.get() == self.failing_op {
+ panic::panic_any(InjectedFailure);
+ }
+
+ let mut data = self.data.borrow_mut();
+ let addr = data.len();
+ data.push(true);
+ Ptr(addr, self)
+ }
+ // FIXME(#47949) Any use of this indicates a bug in rustc: we should never
+ // be leaking values in the cases here.
+ //
+ // Creates a `Ptr<'_>` and checks that the allocated value is leaked if the
+ // `failing_op` is in the list of exception.
+ fn alloc_leaked(&self, exceptions: Vec<usize>) -> Ptr<'_> {
+ let ptr = self.alloc();
+
+ if exceptions.iter().any(|operation| *operation == self.failing_op) {
+ let mut data = self.data.borrow_mut();
+ data[ptr.0] = false;
+ }
+ ptr
+ }
+}
+
+struct Ptr<'a>(usize, &'a Allocator);
+impl<'a> Drop for Ptr<'a> {
+ fn drop(&mut self) {
+ match self.1.data.borrow_mut()[self.0] {
+ false => {
+ panic!("double free at index {:?}", self.0)
+ }
+ ref mut d => *d = false
+ }
+
+ self.1.cur_ops.set(self.1.cur_ops.get()+1);
+
+ if self.1.cur_ops.get() == self.1.failing_op {
+ panic::panic_any(InjectedFailure);
+ }
+ }
+}
+
+fn dynamic_init(a: &Allocator, c: bool) {
+ let _x;
+ if c {
+ _x = Some(a.alloc());
+ }
+}
+
+fn dynamic_drop(a: &Allocator, c: bool) {
+ let x = a.alloc();
+ if c {
+ Some(x)
+ } else {
+ None
+ };
+}
+
+struct TwoPtrs<'a>(Ptr<'a>, #[allow(unused_tuple_struct_fields)] Ptr<'a>);
+fn struct_dynamic_drop(a: &Allocator, c0: bool, c1: bool, c: bool) {
+ for i in 0..2 {
+ let x;
+ let y;
+ if (c0 && i == 0) || (c1 && i == 1) {
+ x = (a.alloc(), a.alloc(), a.alloc());
+ y = TwoPtrs(a.alloc(), a.alloc());
+ if c {
+ drop(x.1);
+ drop(y.0);
+ }
+ }
+ }
+}
+
+fn field_assignment(a: &Allocator, c0: bool) {
+ let mut x = (TwoPtrs(a.alloc(), a.alloc()), a.alloc());
+
+ x.1 = a.alloc();
+ x.1 = a.alloc();
+
+ let f = (x.0).0;
+ if c0 {
+ (x.0).0 = f;
+ }
+}
+
+fn assignment2(a: &Allocator, c0: bool, c1: bool) {
+ let mut _v = a.alloc();
+ let mut _w = a.alloc();
+ if c0 {
+ drop(_v);
+ }
+ _v = _w;
+ if c1 {
+ _w = a.alloc();
+ }
+}
+
+fn assignment1(a: &Allocator, c0: bool) {
+ let mut _v = a.alloc();
+ let mut _w = a.alloc();
+ if c0 {
+ drop(_v);
+ }
+ _v = _w;
+}
+
+union Boxy<T> {
+ a: ManuallyDrop<T>,
+ b: ManuallyDrop<T>,
+}
+
+fn union1(a: &Allocator) {
+ unsafe {
+ let mut u = Boxy { a: ManuallyDrop::new(a.alloc()) };
+ *u.b = a.alloc(); // drops first alloc
+ drop(ManuallyDrop::into_inner(u.a));
+ }
+}
+
+fn array_simple(a: &Allocator) {
+ let _x = [a.alloc(), a.alloc(), a.alloc(), a.alloc()];
+}
+
+fn vec_simple(a: &Allocator) {
+ let _x = vec![a.alloc(), a.alloc(), a.alloc(), a.alloc()];
+}
+
+fn generator(a: &Allocator, run_count: usize) {
+ assert!(run_count < 4);
+
+ let mut gen = || {
+ (a.alloc(),
+ yield a.alloc(),
+ a.alloc(),
+ yield a.alloc()
+ );
+ };
+ for _ in 0..run_count {
+ Pin::new(&mut gen).resume(());
+ }
+}
+
+fn mixed_drop_and_nondrop(a: &Allocator) {
+ // check that destructor panics handle drop
+ // and non-drop blocks in the same scope correctly.
+ //
+ // Surprisingly enough, this used to not work.
+ let (x, y, z);
+ x = a.alloc();
+ y = 5;
+ z = a.alloc();
+}
+
+#[allow(unreachable_code)]
+fn vec_unreachable(a: &Allocator) {
+ let _x = vec![a.alloc(), a.alloc(), a.alloc(), return];
+}
+
+fn slice_pattern_first(a: &Allocator) {
+ let[_x, ..] = [a.alloc(), a.alloc(), a.alloc()];
+}
+
+fn slice_pattern_middle(a: &Allocator) {
+ let[_, _x, _] = [a.alloc(), a.alloc(), a.alloc()];
+}
+
+fn slice_pattern_two(a: &Allocator) {
+ let[_x, _, _y, _] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()];
+}
+
+fn slice_pattern_last(a: &Allocator) {
+ let[.., _y] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()];
+}
+
+fn slice_pattern_one_of(a: &Allocator, i: usize) {
+ let array = [a.alloc(), a.alloc(), a.alloc(), a.alloc()];
+ let _x = match i {
+ 0 => { let [a, ..] = array; a }
+ 1 => { let [_, a, ..] = array; a }
+ 2 => { let [_, _, a, _] = array; a }
+ 3 => { let [_, _, _, a] = array; a }
+ _ => panic!("unmatched"),
+ };
+}
+
+fn subslice_pattern_from_end(a: &Allocator, arg: bool) {
+ let a = [a.alloc(), a.alloc(), a.alloc()];
+ if arg {
+ let[.., _x, _] = a;
+ } else {
+ let[_, _y @ ..] = a;
+ }
+}
+
+fn subslice_pattern_from_end_with_drop(a: &Allocator, arg: bool, arg2: bool) {
+ let a = [a.alloc(), a.alloc(), a.alloc(), a.alloc(), a.alloc()];
+ if arg2 {
+ drop(a);
+ return;
+ }
+
+ if arg {
+ let[.., _x, _] = a;
+ } else {
+ let[_, _y @ ..] = a;
+ }
+}
+
+fn slice_pattern_reassign(a: &Allocator) {
+ let mut ar = [a.alloc(), a.alloc()];
+ let[_, _x] = ar;
+ ar = [a.alloc(), a.alloc()];
+ let[.., _y] = ar;
+}
+
+fn subslice_pattern_reassign(a: &Allocator) {
+ let mut ar = [a.alloc(), a.alloc(), a.alloc()];
+ let[_, _, _x] = ar;
+ ar = [a.alloc(), a.alloc(), a.alloc()];
+ let[_, _y @ ..] = ar;
+}
+
+fn index_field_mixed_ends(a: &Allocator) {
+ let ar = [(a.alloc(), a.alloc()), (a.alloc(), a.alloc())];
+ let[(_x, _), ..] = ar;
+ let[(_, _y), _] = ar;
+ let[_, (_, _w)] = ar;
+ let[.., (_z, _)] = ar;
+}
+
+fn subslice_mixed_min_lengths(a: &Allocator, c: i32) {
+ let ar = [(a.alloc(), a.alloc()), (a.alloc(), a.alloc())];
+ match c {
+ 0 => { let[_x, ..] = ar; }
+ 1 => { let[_x, _, ..] = ar; }
+ 2 => { let[_x, _] = ar; }
+ 3 => { let[(_x, _), _, ..] = ar; }
+ 4 => { let[.., (_x, _)] = ar; }
+ 5 => { let[.., (_x, _), _] = ar; }
+ 6 => { let [_y @ ..] = ar; }
+ _ => { let [_y @ .., _] = ar; }
+ }
+}
+
+fn bindings_after_at_dynamic_init_move(a: &Allocator, c: bool) {
+ let foo = if c { Some(a.alloc()) } else { None };
+ let _x;
+
+ if let bar @ Some(_) = foo {
+ _x = bar;
+ }
+}
+
+fn bindings_after_at_dynamic_init_ref(a: &Allocator, c: bool) {
+ let foo = if c { Some(a.alloc()) } else { None };
+ let _x;
+
+ if let bar @ Some(_baz) = &foo {
+ _x = bar;
+ }
+}
+
+fn bindings_after_at_dynamic_drop_move(a: &Allocator, c: bool) {
+ let foo = if c { Some(a.alloc()) } else { None };
+
+ if let bar @ Some(_) = foo {
+ bar
+ } else {
+ None
+ };
+}
+
+fn bindings_after_at_dynamic_drop_ref(a: &Allocator, c: bool) {
+ let foo = if c { Some(a.alloc()) } else { None };
+
+ if let bar @ Some(_baz) = &foo {
+ bar
+ } else {
+ &None
+ };
+}
+
+fn move_ref_pattern(a: &Allocator) {
+ let mut tup = (a.alloc(), a.alloc(), a.alloc(), a.alloc());
+ let (ref _a, ref mut _b, _c, mut _d) = tup;
+}
+
+fn panic_after_return(a: &Allocator) -> Ptr<'_> {
+ // Panic in the drop of `p` or `q` can leak
+ let exceptions = vec![8, 9];
+ a.alloc();
+ let p = a.alloc();
+ {
+ a.alloc();
+ let p = a.alloc();
+ // FIXME (#47949) We leak values when we panic in a destructor after
+ // evaluating an expression with `rustc_mir::build::Builder::into`.
+ a.alloc_leaked(exceptions)
+ }
+}
+
+fn panic_after_return_expr(a: &Allocator) -> Ptr<'_> {
+ // Panic in the drop of `p` or `q` can leak
+ let exceptions = vec![8, 9];
+ a.alloc();
+ let p = a.alloc();
+ {
+ a.alloc();
+ let q = a.alloc();
+ // FIXME (#47949)
+ return a.alloc_leaked(exceptions);
+ }
+}
+
+fn panic_after_init(a: &Allocator) {
+ // Panic in the drop of `r` can leak
+ let exceptions = vec![8];
+ a.alloc();
+ let p = a.alloc();
+ let q = {
+ a.alloc();
+ let r = a.alloc();
+ // FIXME (#47949)
+ a.alloc_leaked(exceptions)
+ };
+}
+
+fn panic_after_init_temp(a: &Allocator) {
+ // Panic in the drop of `r` can leak
+ let exceptions = vec![8];
+ a.alloc();
+ let p = a.alloc();
+ {
+ a.alloc();
+ let r = a.alloc();
+ // FIXME (#47949)
+ a.alloc_leaked(exceptions)
+ };
+}
+
+fn panic_after_init_by_loop(a: &Allocator) {
+ // Panic in the drop of `r` can leak
+ let exceptions = vec![8];
+ a.alloc();
+ let p = a.alloc();
+ let q = loop {
+ a.alloc();
+ let r = a.alloc();
+ // FIXME (#47949)
+ break a.alloc_leaked(exceptions);
+ };
+}
+
+fn run_test<F>(mut f: F)
+ where F: FnMut(&Allocator)
+{
+ let first_alloc = Allocator::new(usize::MAX);
+ f(&first_alloc);
+
+ for failing_op in 1..first_alloc.cur_ops.get()+1 {
+ let alloc = Allocator::new(failing_op);
+ let alloc = &alloc;
+ let f = panic::AssertUnwindSafe(&mut f);
+ let result = panic::catch_unwind(move || {
+ f.0(alloc);
+ });
+ match result {
+ Ok(..) => panic!("test executed {} ops but now {}",
+ first_alloc.cur_ops.get(), alloc.cur_ops.get()),
+ Err(e) => {
+ if e.downcast_ref::<InjectedFailure>().is_none() {
+ panic::resume_unwind(e);
+ }
+ }
+ }
+ }
+}
+
+fn run_test_nopanic<F>(mut f: F)
+ where F: FnMut(&Allocator)
+{
+ let first_alloc = Allocator::new(usize::MAX);
+ f(&first_alloc);
+}
+
+fn main() {
+ run_test(|a| dynamic_init(a, false));
+ run_test(|a| dynamic_init(a, true));
+ run_test(|a| dynamic_drop(a, false));
+ run_test(|a| dynamic_drop(a, true));
+
+ run_test(|a| assignment2(a, false, false));
+ run_test(|a| assignment2(a, false, true));
+ run_test(|a| assignment2(a, true, false));
+ run_test(|a| assignment2(a, true, true));
+
+ run_test(|a| assignment1(a, false));
+ run_test(|a| assignment1(a, true));
+
+ run_test(|a| array_simple(a));
+ run_test(|a| vec_simple(a));
+ run_test(|a| vec_unreachable(a));
+
+ run_test(|a| struct_dynamic_drop(a, false, false, false));
+ run_test(|a| struct_dynamic_drop(a, false, false, true));
+ run_test(|a| struct_dynamic_drop(a, false, true, false));
+ run_test(|a| struct_dynamic_drop(a, false, true, true));
+ run_test(|a| struct_dynamic_drop(a, true, false, false));
+ run_test(|a| struct_dynamic_drop(a, true, false, true));
+ run_test(|a| struct_dynamic_drop(a, true, true, false));
+ run_test(|a| struct_dynamic_drop(a, true, true, true));
+
+ run_test(|a| field_assignment(a, false));
+ run_test(|a| field_assignment(a, true));
+
+ run_test(|a| generator(a, 0));
+ run_test(|a| generator(a, 1));
+ run_test(|a| generator(a, 2));
+ run_test(|a| generator(a, 3));
+
+ run_test(|a| mixed_drop_and_nondrop(a));
+
+ run_test(|a| slice_pattern_first(a));
+ run_test(|a| slice_pattern_middle(a));
+ run_test(|a| slice_pattern_two(a));
+ run_test(|a| slice_pattern_last(a));
+ run_test(|a| slice_pattern_one_of(a, 0));
+ run_test(|a| slice_pattern_one_of(a, 1));
+ run_test(|a| slice_pattern_one_of(a, 2));
+ run_test(|a| slice_pattern_one_of(a, 3));
+
+ run_test(|a| subslice_pattern_from_end(a, true));
+ run_test(|a| subslice_pattern_from_end(a, false));
+ run_test(|a| subslice_pattern_from_end_with_drop(a, true, true));
+ run_test(|a| subslice_pattern_from_end_with_drop(a, true, false));
+ run_test(|a| subslice_pattern_from_end_with_drop(a, false, true));
+ run_test(|a| subslice_pattern_from_end_with_drop(a, false, false));
+ run_test(|a| slice_pattern_reassign(a));
+ run_test(|a| subslice_pattern_reassign(a));
+
+ run_test(|a| index_field_mixed_ends(a));
+ run_test(|a| subslice_mixed_min_lengths(a, 0));
+ run_test(|a| subslice_mixed_min_lengths(a, 1));
+ run_test(|a| subslice_mixed_min_lengths(a, 2));
+ run_test(|a| subslice_mixed_min_lengths(a, 3));
+ run_test(|a| subslice_mixed_min_lengths(a, 4));
+ run_test(|a| subslice_mixed_min_lengths(a, 5));
+ run_test(|a| subslice_mixed_min_lengths(a, 6));
+ run_test(|a| subslice_mixed_min_lengths(a, 7));
+
+ run_test(|a| move_ref_pattern(a));
+
+ run_test(|a| {
+ panic_after_return(a);
+ });
+ run_test(|a| {
+ panic_after_return_expr(a);
+ });
+ run_test(|a| panic_after_init(a));
+ run_test(|a| panic_after_init_temp(a));
+ run_test(|a| panic_after_init_by_loop(a));
+
+ run_test(|a| bindings_after_at_dynamic_init_move(a, true));
+ run_test(|a| bindings_after_at_dynamic_init_move(a, false));
+ run_test(|a| bindings_after_at_dynamic_init_ref(a, true));
+ run_test(|a| bindings_after_at_dynamic_init_ref(a, false));
+ run_test(|a| bindings_after_at_dynamic_drop_move(a, true));
+ run_test(|a| bindings_after_at_dynamic_drop_move(a, false));
+ run_test(|a| bindings_after_at_dynamic_drop_ref(a, true));
+ run_test(|a| bindings_after_at_dynamic_drop_ref(a, false));
+
+ run_test_nopanic(|a| union1(a));
+}
diff --git a/src/test/ui/drop/issue-10028.rs b/src/test/ui/drop/issue-10028.rs
new file mode 100644
index 000000000..1692470e8
--- /dev/null
+++ b/src/test/ui/drop/issue-10028.rs
@@ -0,0 +1,21 @@
+// run-pass
+#![allow(dead_code)]
+// aux-build:issue-10028.rs
+
+// pretty-expanded FIXME #23616
+
+extern crate issue_10028 as issue10028;
+
+use issue10028::ZeroLengthThingWithDestructor;
+
+struct Foo {
+ zero_length_thing: ZeroLengthThingWithDestructor
+}
+
+fn make_foo() -> Foo {
+ Foo { zero_length_thing: ZeroLengthThingWithDestructor::new() }
+}
+
+fn main() {
+ let _f:Foo = make_foo();
+}
diff --git a/src/test/ui/drop/issue-30018-nopanic.rs b/src/test/ui/drop/issue-30018-nopanic.rs
new file mode 100644
index 000000000..291bab273
--- /dev/null
+++ b/src/test/ui/drop/issue-30018-nopanic.rs
@@ -0,0 +1,103 @@
+// run-pass
+#![allow(unreachable_code)]
+// More thorough regression test for Issues #30018 and #30822. This
+// attempts to explore different ways that array element construction
+// (for both scratch arrays and non-scratch ones) interacts with
+// breaks in the control-flow, in terms of the order of evaluation of
+// the destructors (which may change; see RFC Issue 744) and the
+// number of times that the destructor evaluates for each value (which
+// should never exceed 1; this latter case is what #30822 is about).
+
+use std::cell::RefCell;
+
+struct D<'a>(&'a RefCell<Vec<i32>>, i32);
+
+impl<'a> Drop for D<'a> {
+ fn drop(&mut self) {
+ println!("Dropping D({})", self.1);
+ (self.0).borrow_mut().push(self.1);
+ }
+}
+
+fn main() {
+ println!("Start");
+ break_during_elem();
+ break_after_whole();
+ println!("Finis");
+}
+
+fn break_during_elem() {
+ let log = &RefCell::new(Vec::new());
+
+ // CASE 1: Fixed-size array itself is stored in _r slot.
+ loop {
+ let _r = [D(log, 10),
+ D(log, 11),
+ { D(log, 12); break; },
+ D(log, 13)];
+ }
+ assert_eq!(&log.borrow()[..], &[12, 11, 10]);
+ log.borrow_mut().clear();
+
+ // CASE 2: Slice (borrow of array) is stored in _r slot.
+ // This is the case that is actually being reported in #30018.
+ loop {
+ let _r = &[D(log, 20),
+ D(log, 21),
+ { D(log, 22); break; },
+ D(log, 23)];
+ }
+ assert_eq!(&log.borrow()[..], &[22, 21, 20]);
+ log.borrow_mut().clear();
+
+ // CASE 3: (Borrow of) slice-index of array is stored in _r slot.
+ loop {
+ let _r = &[D(log, 30),
+ D(log, 31),
+ { D(log, 32); break; },
+ D(log, 33)][..];
+ }
+ assert_eq!(&log.borrow()[..], &[32, 31, 30]);
+ log.borrow_mut().clear();
+}
+
+// The purpose of these functions is to test what happens when we
+// panic after an array has been constructed in its entirety.
+//
+// It is meant to act as proof that we still need to continue
+// scheduling the destruction of an array even after we've scheduling
+// drop for its elements during construction; the latter is tested by
+// `fn break_during_elem()`.
+fn break_after_whole() {
+ let log = &RefCell::new(Vec::new());
+
+ // CASE 1: Fixed-size array itself is stored in _r slot.
+ loop {
+ let _r = [D(log, 10),
+ D(log, 11),
+ D(log, 12)];
+ break;
+ }
+ assert_eq!(&log.borrow()[..], &[10, 11, 12]);
+ log.borrow_mut().clear();
+
+ // CASE 2: Slice (borrow of array) is stored in _r slot.
+ loop {
+ let _r = &[D(log, 20),
+ D(log, 21),
+ D(log, 22)];
+ break;
+ }
+ assert_eq!(&log.borrow()[..], &[20, 21, 22]);
+ log.borrow_mut().clear();
+
+ // CASE 3: (Borrow of) slice-index of array is stored in _r slot.
+ loop {
+ let _r = &[D(log, 30),
+ D(log, 31),
+ D(log, 32)][..];
+ break;
+ }
+ assert_eq!(&log.borrow()[..], &[30, 31, 32]);
+ log.borrow_mut().clear();
+}
diff --git a/src/test/ui/drop/issue-35546.rs b/src/test/ui/drop/issue-35546.rs
new file mode 100644
index 000000000..004679a62
--- /dev/null
+++ b/src/test/ui/drop/issue-35546.rs
@@ -0,0 +1,20 @@
+// build-pass
+#![allow(dead_code)]
+// Regression test for #35546. Check that we are able to codegen
+// this. Before we had problems because of the drop glue signature
+// around dropping a trait object (specifically, when dropping the
+// `value` field of `Node<Send>`).
+
+struct Node<T: ?Sized + Send> {
+ next: Option<Box<Node<dyn Send>>>,
+ value: T,
+}
+
+fn clear(head: &mut Option<Box<Node<dyn Send>>>) {
+ match head.take() {
+ Some(node) => *head = node.next,
+ None => (),
+ }
+}
+
+fn main() {}
diff --git a/src/test/ui/drop/issue-90752-raw-ptr-shenanigans.rs b/src/test/ui/drop/issue-90752-raw-ptr-shenanigans.rs
new file mode 100644
index 000000000..4e67b3594
--- /dev/null
+++ b/src/test/ui/drop/issue-90752-raw-ptr-shenanigans.rs
@@ -0,0 +1,41 @@
+// run-pass
+
+use std::cell::RefCell;
+
+struct S<'a>(i32, &'a RefCell<Vec<i32>>);
+
+impl<'a> Drop for S<'a> {
+ fn drop(&mut self) {
+ self.1.borrow_mut().push(self.0);
+ }
+}
+
+fn test(drops: &RefCell<Vec<i32>>) {
+ let mut foo = None;
+ let pfoo: *mut _ = &mut foo;
+
+ match foo {
+ None => (),
+ _ => return,
+ }
+
+ // Both S(0) and S(1) should be dropped, but aren't.
+ unsafe { *pfoo = Some((S(0, drops), S(1, drops))); }
+
+ match foo {
+ Some((_x, _)) => {}
+ _ => {}
+ }
+}
+
+fn main() {
+ let drops = RefCell::new(Vec::new());
+ test(&drops);
+
+ // Ideally, we want this...
+ //assert_eq!(*drops.borrow(), &[0, 1]);
+
+ // But the delayed access through the raw pointer confuses drop elaboration,
+ // causing S(1) to be leaked.
+ assert_eq!(*drops.borrow(), &[0]);
+}
diff --git a/src/test/ui/drop/issue-90752.rs b/src/test/ui/drop/issue-90752.rs
new file mode 100644
index 000000000..4395e45e7
--- /dev/null
+++ b/src/test/ui/drop/issue-90752.rs
@@ -0,0 +1,32 @@
+// run-pass
+
+use std::cell::RefCell;
+
+struct S<'a>(i32, &'a RefCell<Vec<i32>>);
+
+impl<'a> Drop for S<'a> {
+ fn drop(&mut self) {
+ self.1.borrow_mut().push(self.0);
+ }
+}
+
+fn test(drops: &RefCell<Vec<i32>>) {
+ let mut foo = None;
+ match foo {
+ None => (),
+ _ => return,
+ }
+
+ *(&mut foo) = Some((S(0, drops), S(1, drops))); // Both S(0) and S(1) should be dropped
+
+ match foo {
+ Some((_x, _)) => {}
+ _ => {}
+ }
+}
+
+fn main() {
+ let drops = RefCell::new(Vec::new());
+ test(&drops);
+ assert_eq!(*drops.borrow(), &[0, 1]);
+}
diff --git a/src/test/ui/drop/no-drop-flag-size.rs b/src/test/ui/drop/no-drop-flag-size.rs
new file mode 100644
index 000000000..103e70ef6
--- /dev/null
+++ b/src/test/ui/drop/no-drop-flag-size.rs
@@ -0,0 +1,15 @@
+// run-pass
+#![allow(dead_code)]
+use std::mem::size_of;
+
+struct Test<T> {
+ a: T
+}
+
+impl<T> Drop for Test<T> {
+ fn drop(&mut self) { }
+}
+
+pub fn main() {
+ assert_eq!(size_of::<isize>(), size_of::<Test<isize>>());
+}
diff --git a/src/test/ui/drop/nondrop-cycle.rs b/src/test/ui/drop/nondrop-cycle.rs
new file mode 100644
index 000000000..29070f917
--- /dev/null
+++ b/src/test/ui/drop/nondrop-cycle.rs
@@ -0,0 +1,31 @@
+// run-pass
+// pretty-expanded FIXME #23616
+
+use std::cell::Cell;
+
+struct C<'a> {
+ p: Cell<Option<&'a C<'a>>>,
+}
+
+impl<'a> C<'a> {
+ fn new() -> C<'a> { C { p: Cell::new(None) } }
+}
+
+fn f1() {
+ let (c1, c2) = (C::new(), C::new());
+ c1.p.set(Some(&c2));
+ c2.p.set(Some(&c1));
+}
+
+fn f2() {
+ let (c1, c2);
+ c1 = C::new();
+ c2 = C::new();
+ c1.p.set(Some(&c2));
+ c2.p.set(Some(&c1));
+}
+
+fn main() {
+ f1();
+ f2();
+}
diff --git a/src/test/ui/drop/repeat-drop-2.rs b/src/test/ui/drop/repeat-drop-2.rs
new file mode 100644
index 000000000..59d5ef202
--- /dev/null
+++ b/src/test/ui/drop/repeat-drop-2.rs
@@ -0,0 +1,15 @@
+fn borrowck_catch() {
+ let foo = String::new();
+ let _bar = foo;
+ let _baz = [foo; 0]; //~ ERROR use of moved value: `foo` [E0382]
+}
+
+const _: [String; 0] = [String::new(); 0];
+//~^ ERROR destructors cannot be evaluated at compile-time [E0493]
+
+fn must_be_init() {
+ let x: u8;
+ let _ = [x; 0]; //~ ERROR E0381
+}
+
+fn main() {}
diff --git a/src/test/ui/drop/repeat-drop-2.stderr b/src/test/ui/drop/repeat-drop-2.stderr
new file mode 100644
index 000000000..48fa2bfa9
--- /dev/null
+++ b/src/test/ui/drop/repeat-drop-2.stderr
@@ -0,0 +1,31 @@
+error[E0382]: use of moved value: `foo`
+ --> $DIR/repeat-drop-2.rs:4:17
+ |
+LL | let foo = String::new();
+ | --- move occurs because `foo` has type `String`, which does not implement the `Copy` trait
+LL | let _bar = foo;
+ | --- value moved here
+LL | let _baz = [foo; 0];
+ | ^^^ value used here after move
+
+error[E0493]: destructors cannot be evaluated at compile-time
+ --> $DIR/repeat-drop-2.rs:7:25
+ |
+LL | const _: [String; 0] = [String::new(); 0];
+ | -^^^^^^^^^^^^^----
+ | ||
+ | |constants cannot evaluate destructors
+ | value is dropped here
+
+error[E0381]: used binding `x` isn't initialized
+ --> $DIR/repeat-drop-2.rs:12:14
+ |
+LL | let x: u8;
+ | - binding declared here but left uninitialized
+LL | let _ = [x; 0];
+ | ^ `x` used here but it isn't initialized
+
+error: aborting due to 3 previous errors
+
+Some errors have detailed explanations: E0381, E0382, E0493.
+For more information about an error, try `rustc --explain E0381`.
diff --git a/src/test/ui/drop/repeat-drop.rs b/src/test/ui/drop/repeat-drop.rs
new file mode 100644
index 000000000..a43612e5d
--- /dev/null
+++ b/src/test/ui/drop/repeat-drop.rs
@@ -0,0 +1,121 @@
+// run-pass
+// needs-unwind
+// ignore-wasm32-bare no unwinding panic
+// ignore-avr no unwinding panic
+// ignore-nvptx64 no unwinding panic
+
+static mut CHECK: usize = 0;
+
+struct DropChecker(usize);
+
+impl Drop for DropChecker {
+ fn drop(&mut self) {
+ unsafe {
+ if CHECK != self.0 - 1 {
+ panic!("Found {}, should have found {}", CHECK, self.0 - 1);
+ }
+ CHECK = self.0;
+ }
+ }
+}
+
+macro_rules! check_drops {
+ ($l:literal) => {
+ unsafe { assert_eq!(CHECK, $l) }
+ };
+}
+
+struct DropPanic;
+
+impl Drop for DropPanic {
+ fn drop(&mut self) {
+ panic!()
+ }
+}
+
+fn value_zero() {
+ unsafe { CHECK = 0 };
+ let foo = DropChecker(1);
+ let v: [DropChecker; 0] = [foo; 0];
+ check_drops!(1);
+ std::mem::drop(v);
+ check_drops!(1);
+}
+
+fn value_one() {
+ unsafe { CHECK = 0 };
+ let foo = DropChecker(1);
+ let v: [DropChecker; 1] = [foo; 1];
+ check_drops!(0);
+ std::mem::drop(v);
+ check_drops!(1);
+}
+
+const DROP_CHECKER: DropChecker = DropChecker(1);
+
+fn const_zero() {
+ unsafe { CHECK = 0 };
+ let v: [DropChecker; 0] = [DROP_CHECKER; 0];
+ check_drops!(0);
+ std::mem::drop(v);
+ check_drops!(0);
+}
+
+fn const_one() {
+ unsafe { CHECK = 0 };
+ let v: [DropChecker; 1] = [DROP_CHECKER; 1];
+ check_drops!(0);
+ std::mem::drop(v);
+ check_drops!(1);
+}
+
+fn const_generic_zero<const N: usize>() {
+ unsafe { CHECK = 0 };
+ let v: [DropChecker; N] = [DROP_CHECKER; N];
+ check_drops!(0);
+ std::mem::drop(v);
+ check_drops!(0);
+}
+
+fn const_generic_one<const N: usize>() {
+ unsafe { CHECK = 0 };
+ let v: [DropChecker; N] = [DROP_CHECKER; N];
+ check_drops!(0);
+ std::mem::drop(v);
+ check_drops!(1);
+}
+
+// Make sure that things are allowed to promote as expected
+
+fn allow_promote() {
+ unsafe { CHECK = 0 };
+ let foo = DropChecker(1);
+ let v: &'static [DropChecker; 0] = &[foo; 0];
+ check_drops!(1);
+ std::mem::drop(v);
+ check_drops!(1);
+}
+
+// Verify that unwinding in the drop causes the right things to drop in the right order
+fn on_unwind() {
+ unsafe { CHECK = 0 };
+ std::panic::catch_unwind(|| {
+ let panic = DropPanic;
+ let _local = DropChecker(2);
+ let _v = (DropChecker(1), [panic; 0]);
+ std::process::abort();
+ })
+ .unwrap_err();
+ check_drops!(2);
+}
+
+fn main() {
+ value_zero();
+ value_one();
+ const_zero();
+ const_one();
+ const_generic_zero::<0>();
+ const_generic_one::<1>();
+ allow_promote();
+ on_unwind();
+}
diff --git a/src/test/ui/drop/terminate-in-initializer.rs b/src/test/ui/drop/terminate-in-initializer.rs
new file mode 100644
index 000000000..66f267aa7
--- /dev/null
+++ b/src/test/ui/drop/terminate-in-initializer.rs
@@ -0,0 +1,34 @@
+// run-pass
+// needs-unwind
+// ignore-emscripten no threads support
+
+// Issue #787
+// Don't try to clean up uninitialized locals
+
+
+use std::thread;
+
+fn test_break() { loop { let _x: Box<isize> = break; } }
+
+fn test_cont() { let mut i = 0; while i < 1 { i += 1; let _x: Box<isize> = continue; } }
+
+fn test_ret() { let _x: Box<isize> = return; }
+
+fn test_panic() {
+ fn f() { let _x: Box<isize> = panic!(); }
+ thread::spawn(move|| f() ).join().unwrap_err();
+}
+
+fn test_panic_indirect() {
+ fn f() -> ! { panic!(); }
+ fn g() { let _x: Box<isize> = f(); }
+ thread::spawn(move|| g() ).join().unwrap_err();
+}
+
+pub fn main() {
+ test_break();
+ test_cont();
+ test_ret();
+ test_panic();
+ test_panic_indirect();
+}
diff --git a/src/test/ui/drop/use_inline_dtor.rs b/src/test/ui/drop/use_inline_dtor.rs
new file mode 100644
index 000000000..ac916de46
--- /dev/null
+++ b/src/test/ui/drop/use_inline_dtor.rs
@@ -0,0 +1,10 @@
+// run-pass
+// aux-build:inline_dtor.rs
+
+// pretty-expanded FIXME #23616
+
+extern crate inline_dtor;
+
+pub fn main() {
+ let _x = inline_dtor::Foo;
+}