refcount.c 10.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/*
 * Variant of atomic_t specialized for reference counts.
 *
 * The interface matches the atomic_t interface (to aid in porting) but only
 * provides the few functions one should use for reference counting.
 *
 * It differs in that the counter saturates at UINT_MAX and will not move once
 * there. This avoids wrapping the counter and causing 'spurious'
 * use-after-free issues.
 *
 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
 * and provide only what is strictly required for refcounts.
 *
 * The increments are fully relaxed; these will not provide ordering. The
 * rationale is that whatever is used to obtain the object we're increasing the
 * reference count on will provide the ordering. For locked data structures,
 * its the lock acquire, for RCU/lockless data structures its the dependent
 * load.
 *
 * Do note that inc_not_zero() provides a control dependency which will order
 * future stores against the inc, this ensures we'll never modify the object
 * if we did not in fact acquire a reference.
 *
 * The decrements will provide release order, such that all the prior loads and
 * stores will be issued before, it also provides a control dependency, which
 * will order us against the subsequent free().
 *
 * The control dependency is against the load of the cmpxchg (ll/sc) that
 * succeeded. This means the stores aren't fully ordered, but this is fine
 * because the 1->0 transition indicates no concurrency.
 *
 * Note that the allocator is responsible for ordering things between free()
 * and alloc().
 *
 */

#include <linux/refcount.h>
#include <linux/bug.h>

41 42
#ifdef CONFIG_REFCOUNT_FULL

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
/**
 * refcount_add_not_zero - add a value to a refcount unless it is 0
 * @i: the value to add to the refcount
 * @r: the refcount
 *
 * Will saturate at UINT_MAX and WARN.
 *
 * Provides no memory ordering, it is assumed the caller has guaranteed the
 * object memory to be stable (RCU, etc.). It does provide a control dependency
 * and thereby orders future stores. See the comment on top.
 *
 * Use of this function is not recommended for the normal reference counting
 * use case in which references are taken and released one at a time.  In these
 * cases, refcount_inc(), or one of its variants, should instead be used to
 * increment a reference count.
 *
 * Return: false if the passed refcount is 0, true otherwise
 */
61 62
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
63
	unsigned int new, val = atomic_read(&r->refs);
64

65
	do {
66 67 68 69 70 71 72 73 74 75
		if (!val)
			return false;

		if (unlikely(val == UINT_MAX))
			return true;

		new = val + i;
		if (new < val)
			new = UINT_MAX;

76
	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
77

78
	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
79 80 81

	return true;
}
82
EXPORT_SYMBOL(refcount_add_not_zero);
83

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
/**
 * refcount_add - add a value to a refcount
 * @i: the value to add to the refcount
 * @r: the refcount
 *
 * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
 *
 * Provides no memory ordering, it is assumed the caller has guaranteed the
 * object memory to be stable (RCU, etc.). It does provide a control dependency
 * and thereby orders future stores. See the comment on top.
 *
 * Use of this function is not recommended for the normal reference counting
 * use case in which references are taken and released one at a time.  In these
 * cases, refcount_inc(), or one of its variants, should instead be used to
 * increment a reference count.
 */
100 101
void refcount_add(unsigned int i, refcount_t *r)
{
102
	WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
103
}
104
EXPORT_SYMBOL(refcount_add);
105

106 107 108 109 110
/**
 * refcount_inc_not_zero - increment a refcount unless it is 0
 * @r: the refcount to increment
 *
 * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
111 112 113 114
 *
 * Provides no memory ordering, it is assumed the caller has guaranteed the
 * object memory to be stable (RCU, etc.). It does provide a control dependency
 * and thereby orders future stores. See the comment on top.
115 116
 *
 * Return: true if the increment was successful, false otherwise
117 118 119
 */
bool refcount_inc_not_zero(refcount_t *r)
{
120
	unsigned int new, val = atomic_read(&r->refs);
121

122
	do {
123 124 125 126 127 128 129 130
		new = val + 1;

		if (!val)
			return false;

		if (unlikely(!new))
			return true;

131
	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
132

133
	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
134 135 136

	return true;
}
137
EXPORT_SYMBOL(refcount_inc_not_zero);
138

139 140 141 142 143
/**
 * refcount_inc - increment a refcount
 * @r: the refcount to increment
 *
 * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
144 145
 *
 * Provides no memory ordering, it is assumed the caller already has a
146 147 148 149
 * reference on the object.
 *
 * Will WARN if the refcount is 0, as this represents a possible use-after-free
 * condition.
150 151 152
 */
void refcount_inc(refcount_t *r)
{
153
	WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
154
}
155
EXPORT_SYMBOL(refcount_inc);
156

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
/**
 * refcount_sub_and_test - subtract from a refcount and test if it is 0
 * @i: amount to subtract from the refcount
 * @r: the refcount
 *
 * Similar to atomic_dec_and_test(), but it will WARN, return false and
 * ultimately leak on underflow and will fail to decrement when saturated
 * at UINT_MAX.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before, and provides a control dependency such that free() must come after.
 * See the comment on top.
 *
 * Use of this function is not recommended for the normal reference counting
 * use case in which references are taken and released one at a time.  In these
 * cases, refcount_dec(), or one of its variants, should instead be used to
 * decrement a reference count.
 *
 * Return: true if the resulting refcount is 0, false otherwise
 */
177 178
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
179
	unsigned int new, val = atomic_read(&r->refs);
180

181
	do {
182 183 184 185 186
		if (unlikely(val == UINT_MAX))
			return false;

		new = val - i;
		if (new > val) {
187
			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
188 189 190
			return false;
		}

191
	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
192 193 194

	return !new;
}
195
EXPORT_SYMBOL(refcount_sub_and_test);
196

197 198 199 200
/**
 * refcount_dec_and_test - decrement a refcount and test if it is 0
 * @r: the refcount
 *
201 202 203 204 205 206
 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
 * decrement when saturated at UINT_MAX.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before, and provides a control dependency such that free() must come after.
 * See the comment on top.
207 208
 *
 * Return: true if the resulting refcount is 0, false otherwise
209 210 211 212 213
 */
bool refcount_dec_and_test(refcount_t *r)
{
	return refcount_sub_and_test(1, r);
}
214
EXPORT_SYMBOL(refcount_dec_and_test);
215

216 217 218 219
/**
 * refcount_dec - decrement a refcount
 * @r: the refcount
 *
220 221 222 223 224 225 226 227
 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
 * when saturated at UINT_MAX.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before.
 */
void refcount_dec(refcount_t *r)
{
228
	WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
229
}
230
EXPORT_SYMBOL(refcount_dec);
231
#endif /* CONFIG_REFCOUNT_FULL */
232

233 234 235 236
/**
 * refcount_dec_if_one - decrement a refcount if it is 1
 * @r: the refcount
 *
237 238 239 240 241 242 243 244 245
 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
 * success thereof.
 *
 * Like all decrement operations, it provides release memory order and provides
 * a control dependency.
 *
 * It can be used like a try-delete operator; this explicit case is provided
 * and not cmpxchg in generic, because that would allow implementing unsafe
 * operations.
246 247
 *
 * Return: true if the resulting refcount is 0, false otherwise
248 249 250
 */
bool refcount_dec_if_one(refcount_t *r)
{
251 252 253
	int val = 1;

	return atomic_try_cmpxchg_release(&r->refs, &val, 0);
254
}
255
EXPORT_SYMBOL(refcount_dec_if_one);
256

257 258 259 260
/**
 * refcount_dec_not_one - decrement a refcount if it is not 1
 * @r: the refcount
 *
261 262 263 264
 * No atomic_t counterpart, it decrements unless the value is 1, in which case
 * it will return false.
 *
 * Was often done like: atomic_add_unless(&var, -1, 1)
265 266
 *
 * Return: true if the decrement operation was successful, false otherwise
267 268 269
 */
bool refcount_dec_not_one(refcount_t *r)
{
270
	unsigned int new, val = atomic_read(&r->refs);
271

272
	do {
273 274 275 276 277 278 279 280
		if (unlikely(val == UINT_MAX))
			return true;

		if (val == 1)
			return false;

		new = val - 1;
		if (new > val) {
281
			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
282 283 284
			return true;
		}

285
	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
286 287 288

	return true;
}
289
EXPORT_SYMBOL(refcount_dec_not_one);
290

291 292 293 294 295 296
/**
 * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
 *                               refcount to 0
 * @r: the refcount
 * @lock: the mutex to be locked
 *
297 298 299 300 301 302
 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
 * to decrement when saturated at UINT_MAX.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before, and provides a control dependency such that free() must come after.
 * See the comment on top.
303 304 305
 *
 * Return: true and hold mutex if able to decrement refcount to 0, false
 *         otherwise
306 307 308 309 310 311 312 313 314 315 316 317 318 319
 */
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
	if (refcount_dec_not_one(r))
		return false;

	mutex_lock(lock);
	if (!refcount_dec_and_test(r)) {
		mutex_unlock(lock);
		return false;
	}

	return true;
}
320
EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
321

322 323 324 325 326 327
/**
 * refcount_dec_and_lock - return holding spinlock if able to decrement
 *                         refcount to 0
 * @r: the refcount
 * @lock: the spinlock to be locked
 *
328 329 330 331 332 333
 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
 * decrement when saturated at UINT_MAX.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before, and provides a control dependency such that free() must come after.
 * See the comment on top.
334 335 336
 *
 * Return: true and hold spinlock if able to decrement refcount to 0, false
 *         otherwise
337 338 339 340 341 342 343 344 345 346 347 348 349 350
 */
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
	if (refcount_dec_not_one(r))
		return false;

	spin_lock(lock);
	if (!refcount_dec_and_test(r)) {
		spin_unlock(lock);
		return false;
	}

	return true;
}
351
EXPORT_SYMBOL(refcount_dec_and_lock);
352