summaryrefslogtreecommitdiffstats
path: root/debian/perl-framework/t/apache/leaks.t
blob: 99ce600943f8663d1acf950a70c9833c5eba6825 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
use strict;
use warnings FATAL => 'all';

use Apache::Test;
use Apache::TestRequest;
use Apache::TestUtil;

my $url = "/memory_track";
my $init_iters = 2000;
my $iters = 500;

my $num_tests = $init_iters + $iters * 2;
plan tests => $num_tests;

### this doesn't seem sufficient to force all requests over a single
### persistent connection any more, is there a better trick?
Apache::TestRequest::user_agent(keep_alive => 1);
Apache::TestRequest::scheme('http');

my $r = GET $url;

if ($r->code != 200) {
    # these tests will be skipped for async MPMs or with an APR not
    # built with --enable-pool-debug.
    skip "mod_memory_track not activated" foreach (1..$num_tests);
}
else {
    my $cid = -1;
    my $mem;

    # initial iterations should get workers to steady-state memory use.
    foreach (1..$init_iters) {
        ok t_cmp(GET_RC($url), 200, "200 response");
    }

    # now test whether c->pool memory is increasing for further
    # requests on a given conn_rec (matched by id)... could track them
    # all with a bit more effort.
    foreach (1..$iters) {
        $r = GET $url;

        print "# iter $_\n";
        
        ok t_cmp($r->code, 200, "got response");

        my $content = $r->content;
        chomp $content;
        my ($key, $id, $bytes) = split ',', $content;

        print "# $key, $id, $bytes\n";

        if ($cid == -1) {
            $cid = $id;
            $mem = $bytes;
            ok 1;
        }
        elsif ($cid != $id) {
            skip "using wrong connection";
        }
        elsif ($bytes > $mem) {
            print "# error: pool memory increased from $mem to $bytes!\n";
            ok 0;
        }
        else {
            ok 1;
        }
    }
}