summaryrefslogtreecommitdiffstats
path: root/src/boost/libs/yap/example/self_evaluation.cpp
blob: d5c7323d744e2a8eb900be441c6553160463ffcd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
// Copyright (C) 2016-2018 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//[ self_evaluation
#include <boost/yap/expression.hpp>

#include <boost/optional.hpp>
#include <boost/hana/fold.hpp>
#include <boost/hana/maximum.hpp>

#include <algorithm>
#include <cassert>
#include <iostream>
#include <vector>


// A super-basic matrix type, and a few associated operations.
struct matrix
{
    matrix() : values_(), rows_(0), cols_(0) {}

    matrix(int rows, int cols) : values_(rows * cols), rows_(rows), cols_(cols)
    {
        assert(0 < rows);
        assert(0 < cols);
    }

    int rows() const { return rows_; }
    int cols() const { return cols_; }

    double operator()(int r, int c) const
    { return values_[r * cols_ + c]; }
    double & operator()(int r, int c)
    { return values_[r * cols_ + c]; }

private:
    std::vector<double> values_;
    int rows_;
    int cols_;
};

matrix operator*(matrix const & lhs, double x)
{
    matrix retval = lhs;
    for (int i = 0; i < retval.rows(); ++i) {
        for (int j = 0; j < retval.cols(); ++j) {
            retval(i, j) *= x;
        }
    }
    return retval;
}
matrix operator*(double x, matrix const & lhs) { return lhs * x; }

matrix operator+(matrix const & lhs, matrix const & rhs)
{
    assert(lhs.rows() == rhs.rows());
    assert(lhs.cols() == rhs.cols());
    matrix retval = lhs;
    for (int i = 0; i < retval.rows(); ++i) {
        for (int j = 0; j < retval.cols(); ++j) {
            retval(i, j) += rhs(i, j);
        }
    }
    return retval;
}

// daxpy() means Double-precision AX Plus Y.  This crazy name comes from BLAS.
// It is more efficient than a naive implementation, because it does not
// create temporaries.  The covnention of using Y as an out-parameter comes
// from FORTRAN BLAS.
matrix & daxpy(double a, matrix const & x, matrix & y)
{
    assert(x.rows() == y.rows());
    assert(x.cols() == y.cols());
    for (int i = 0; i < y.rows(); ++i) {
        for (int j = 0; j < y.cols(); ++j) {
            y(i, j) += a * x(i, j);
        }
    }
    return y;
}

template <boost::yap::expr_kind Kind, typename Tuple>
struct self_evaluating_expr;

template <boost::yap::expr_kind Kind, typename Tuple>
auto evaluate_matrix_expr(self_evaluating_expr<Kind, Tuple> const & expr);

// This is the primary template for our expression template.  If you assign a
// self_evaluating_expr to a matrix, its conversion operator transforms and
// evaluates the expression with a call to evaluate_matrix_expr().
template <boost::yap::expr_kind Kind, typename Tuple>
struct self_evaluating_expr
{
    operator auto() const;

    static const boost::yap::expr_kind kind = Kind;

    Tuple elements;
};

// This is a specialization of our expression template for assignment
// expressions.  The destructor transforms and evaluates via a call to
// evaluate_matrix_expr(), and then assigns the result to the variable on the
// left side of the assignment.
//
// In a production implementation, you'd need to have specializations for
// plus_assign, minus_assign, etc.
template <typename Tuple>
struct self_evaluating_expr<boost::yap::expr_kind::assign, Tuple>
{
    ~self_evaluating_expr();

    static const boost::yap::expr_kind kind = boost::yap::expr_kind::assign;

    Tuple elements;
};

struct use_daxpy
{
    // A plus-expression, which may be of the form double * matrix + matrix,
    // or may be something else.  Since our daxpy() above requires a mutable
    // "y", we only need to match a mutable lvalue matrix reference here.
    template <typename Tuple>
    auto operator()(
        boost::yap::expr_tag<boost::yap::expr_kind::plus>,
        self_evaluating_expr<boost::yap::expr_kind::multiplies, Tuple> const & expr,
        matrix & m)
    {
        // Here, we transform the left-hand side into a pair if it's the
        // double * matrix operation we're looking for.  Otherwise, we just
        // get a copy of the left side expression.
        //
        // Note that this is a bit of a cheat, done for clarity.  If we pass a
        // larger expression that happens to contain a double * matrix
        // subexpression, that subexpression will be transformed into a tuple!
        // In production code, this transform should probably only be
        // performed on an expression with all terminal members.
        auto lhs = boost::yap::transform(
            expr,
            [](boost::yap::expr_tag<boost::yap::expr_kind::multiplies>,
               double d,
               matrix const & m) {
                return std::pair<double, matrix const &>(d, m);
            });

        // If we got back a copy of expr above, just re-construct the
        // expression this function mathes; in other words, do not effectively
        // transform anything.  Otherwise, replace the expression matched by
        // this function with a call to daxpy().
        if constexpr (boost::yap::is_expr<decltype(lhs)>::value) {
            return expr + m;
        } else {
            return boost::yap::make_terminal(daxpy)(lhs.first, lhs.second, m);
        }
    }
};


// This is the heart of what self_evaluating_expr does.  If we had other
// optimizations/transformations we wanted to do, we'd put them in this
// function, either before or after the use_daxpy transformation.
template <boost::yap::expr_kind Kind, typename Tuple>
auto evaluate_matrix_expr(self_evaluating_expr<Kind, Tuple> const & expr)
{
    auto daxpy_form = boost::yap::transform(expr, use_daxpy{});
    return boost::yap::evaluate(daxpy_form);
}

template<boost::yap::expr_kind Kind, typename Tuple>
self_evaluating_expr<Kind, Tuple>::operator auto() const
{
    return evaluate_matrix_expr(*this);
}

template<typename Tuple>
self_evaluating_expr<boost::yap::expr_kind::assign, Tuple>::
    ~self_evaluating_expr()
{
    using namespace boost::hana::literals;
    boost::yap::evaluate(elements[0_c]) = evaluate_matrix_expr(elements[1_c]);
}

// In order to define the = operator with the semantics we want, it's
// convenient to derive a terminal type from a terminal instantiation of
// self_evaluating_expr.  Note that we could have written a template
// specialization here instead -- either one would work.  That would of course
// have required more typing.
struct self_evaluating :
    self_evaluating_expr<
        boost::yap::expr_kind::terminal,
        boost::hana::tuple<matrix>
    >
{
    self_evaluating() {}

    explicit self_evaluating(matrix m)
    { elements = boost::hana::tuple<matrix>(std::move(m)); }

    BOOST_YAP_USER_ASSIGN_OPERATOR(self_evaluating_expr, ::self_evaluating_expr);
};

BOOST_YAP_USER_BINARY_OPERATOR(plus, self_evaluating_expr, self_evaluating_expr)
BOOST_YAP_USER_BINARY_OPERATOR(minus, self_evaluating_expr, self_evaluating_expr)
BOOST_YAP_USER_BINARY_OPERATOR(multiplies, self_evaluating_expr, self_evaluating_expr)


int main()
{
    matrix identity(2, 2);
    identity(0, 0) = 1.0;
    identity(1, 1) = 1.0;

    // These are YAP-ified terminal expressions.
    self_evaluating m1(identity);
    self_evaluating m2(identity);
    self_evaluating m3(identity);

    // This transforms the YAP expression to use daxpy(), so it creates no
    // temporaries.  The transform happens in the destructor of the
    // assignment-expression specialization of self_evaluating_expr.
    m1 = 3.0 * m2 + m3;

    // Same as above, except that it uses the matrix conversion operator on
    // the self_evaluating_expr primary template, because here we're assigning
    // a YAP expression to a non-YAP-ified matrix.
    matrix m_result_1 = 3.0 * m2 + m3;

    // Creates temporaries and does not use daxpy(), because the A * X + Y
    // pattern does not occur within the expression.
    matrix m_result_2 = 3.0 * m2;

    return 0;
}
//]