summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_task.c
blob: 446ee2c3d381361227cad59f5896cfa041ccf2e7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
 */

#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/hardirq.h>

#include "rxe.h"

int __rxe_do_task(struct rxe_task *task)

{
	int ret;

	while ((ret = task->func(task->arg)) == 0)
		;

	task->ret = ret;

	return ret;
}

/*
 * this locking is due to a potential race where
 * a second caller finds the task already running
 * but looks just after the last call to func
 */
void rxe_do_task(struct tasklet_struct *t)
{
	int cont;
	int ret;
	struct rxe_task *task = from_tasklet(task, t, tasklet);
	unsigned int iterations = RXE_MAX_ITERATIONS;

	spin_lock_bh(&task->state_lock);
	switch (task->state) {
	case TASK_STATE_START:
		task->state = TASK_STATE_BUSY;
		spin_unlock_bh(&task->state_lock);
		break;

	case TASK_STATE_BUSY:
		task->state = TASK_STATE_ARMED;
		fallthrough;
	case TASK_STATE_ARMED:
		spin_unlock_bh(&task->state_lock);
		return;

	default:
		spin_unlock_bh(&task->state_lock);
		pr_warn("%s failed with bad state %d\n", __func__, task->state);
		return;
	}

	do {
		cont = 0;
		ret = task->func(task->arg);

		spin_lock_bh(&task->state_lock);
		switch (task->state) {
		case TASK_STATE_BUSY:
			if (ret) {
				task->state = TASK_STATE_START;
			} else if (iterations--) {
				cont = 1;
			} else {
				/* reschedule the tasklet and exit
				 * the loop to give up the cpu
				 */
				tasklet_schedule(&task->tasklet);
				task->state = TASK_STATE_START;
			}
			break;

		/* someone tried to run the task since the last time we called
		 * func, so we will call one more time regardless of the
		 * return value
		 */
		case TASK_STATE_ARMED:
			task->state = TASK_STATE_BUSY;
			cont = 1;
			break;

		default:
			pr_warn("%s failed with bad state %d\n", __func__,
				task->state);
		}
		spin_unlock_bh(&task->state_lock);
	} while (cont);

	task->ret = ret;
}

int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *))
{
	task->arg	= arg;
	task->func	= func;
	task->destroyed	= false;

	tasklet_setup(&task->tasklet, rxe_do_task);

	task->state = TASK_STATE_START;
	spin_lock_init(&task->state_lock);

	return 0;
}

void rxe_cleanup_task(struct rxe_task *task)
{
	bool idle;

	/*
	 * Mark the task, then wait for it to finish. It might be
	 * running in a non-tasklet (direct call) context.
	 */
	task->destroyed = true;

	do {
		spin_lock_bh(&task->state_lock);
		idle = (task->state == TASK_STATE_START);
		spin_unlock_bh(&task->state_lock);
	} while (!idle);

	tasklet_kill(&task->tasklet);
}

void rxe_run_task(struct rxe_task *task)
{
	if (task->destroyed)
		return;

	rxe_do_task(&task->tasklet);
}

void rxe_sched_task(struct rxe_task *task)
{
	if (task->destroyed)
		return;

	tasklet_schedule(&task->tasklet);
}

void rxe_disable_task(struct rxe_task *task)
{
	tasklet_disable(&task->tasklet);
}

void rxe_enable_task(struct rxe_task *task)
{
	tasklet_enable(&task->tasklet);
}