This repository has been archived by the owner on Apr 27, 2023. It is now read-only.
forked from redhat-performance/libMicro
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcascade_lockf.c
223 lines (182 loc) · 4.47 KB
/
cascade_lockf.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms
* of the Common Development and Distribution License
* (the "License"). You may not use this file except
* in compliance with the License.
*
* You can obtain a copy of the license at
* src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing
* permissions and limitations under the License.
*
* When distributing Covered Code, include this CDDL
* HEADER in each file and include the License file at
* usr/src/OPENSOLARIS.LICENSE. If applicable,
* add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your
* own identifying information: Portions Copyright [yyyy]
* [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* The "cascade" test case is a multiprocess/multithread batten-passing model
* using lock primitives alone for synchronisation. Threads are arranged in a
* ring. Each thread has two locks of its own on which it blocks, and is able
* to manipulate the two locks belonging to the thread which follows it in the
* ring.
*
* The number of threads (nthreads) is specified by the generic libMicro -P/-T
* options. With nthreads == 1 (the default) the uncontended case can be timed.
*
* The main logic is generic and allows any simple blocking API to be tested.
* The API-specific component is clearly indicated.
*/
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <fcntl.h>
#include "libmicro.h"
typedef struct {
int ts_once;
int ts_id;
int ts_us0; /* our lock indices */
int ts_us1;
int ts_them0; /* their lock indices */
int ts_them1;
} tsd_t;
static int nthreads;
/*
* API-specific code BEGINS here
*/
#define DEFD "/tmp"
static char *optd = DEFD;
static int nfiles;
static int *files;
int
benchmark_init()
{
lm_tsdsize = sizeof (tsd_t);
(void) sprintf(lm_optstr, "d:");
lm_defN = "cscd_lockf";
(void) sprintf(lm_usage,
" [-d directory for temp files (default %s)]\n"
"notes: thread cascade using lockf file locking\n",
DEFD);
return (0);
}
int
benchmark_optswitch(int opt, char *optarg)
{
switch (opt) {
case 'd':
optd = optarg;
break;
default:
return (-1);
}
return (0);
}
int
benchmark_initrun()
{
int i;
int errors = 0;
char fname[1024];
nthreads = lm_optP * lm_optT;
nfiles = nthreads * 2;
(void) setfdlimit(nfiles + 10);
files = (int *)malloc(nfiles * sizeof (int));
if (files == NULL) {
return (1);
}
(void) sprintf(fname, "%s/cascade.%ld", optd, getpid());
for (i = 0; i < nfiles; i++) {
files[i] = open(fname, O_CREAT | O_TRUNC | O_RDWR, 0600);
if (files[i] == -1) {
errors++;
}
if (unlink(fname)) {
errors++;
}
}
return (errors);
}
int
block(int index)
{
return (lockf(files[index], F_LOCK, 0) == -1);
}
int
unblock(int index)
{
return (lockf(files[index], F_ULOCK, 0) == -1);
}
/*
* API-specific code ENDS here
*/
int
benchmark_initbatch(void *tsd)
{
tsd_t *ts = (tsd_t *)tsd;
int e = 0;
if (ts->ts_once == 0) {
int us, them;
us = (getpindex() * lm_optT) + gettindex();
them = (us + 1) % (lm_optP * lm_optT);
ts->ts_id = us;
/* lock index asignment for us and them */
ts->ts_us0 = (us * 2);
ts->ts_us1 = (us * 2) + 1;
if (us < nthreads - 1) {
/* straight-thru connection to them */
ts->ts_them0 = (them * 2);
ts->ts_them1 = (them * 2) + 1;
} else {
/* cross-over connection to them */
ts->ts_them0 = (them * 2) + 1;
ts->ts_them1 = (them * 2);
}
ts->ts_once = 1;
}
/* block their first move */
e += block(ts->ts_them0);
return (e);
}
int
benchmark(void *tsd, result_t *res)
{
tsd_t *ts = (tsd_t *)tsd;
int i;
int e = 0;
/* wait to be unblocked (id == 0 will not block) */
e += block(ts->ts_us0);
for (i = 0; i < lm_optB; i += 2) {
/* allow them to block us again */
e += unblock(ts->ts_us0);
/* block their next + 1 move */
e += block(ts->ts_them1);
/* unblock their next move */
e += unblock(ts->ts_them0);
/* wait for them to unblock us */
e += block(ts->ts_us1);
/* repeat with locks reversed */
e += unblock(ts->ts_us1);
e += block(ts->ts_them0);
e += unblock(ts->ts_them1);
e += block(ts->ts_us0);
}
/* finish batch with nothing blocked */
e += unblock(ts->ts_them0);
e += unblock(ts->ts_us0);
res->re_count = i;
res->re_errors = e;
return (0);
}