Training courses

Kernel and Embedded Linux

Bootlin training courses

Embedded Linux, kernel,
Yocto Project, Buildroot, real-time,
graphics, boot time, debugging...

Bootlin logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
/*	$NetBSD: subr_cprng.c,v 1.30.2.4 2020/05/18 18:57:31 martin Exp $ */

/*-
 * Copyright (c) 2011-2013 The NetBSD Foundation, Inc.
 * All rights reserved.
 *
 * This code is derived from software contributed to The NetBSD Foundation
 * by Thor Lancelot Simon and Taylor R. Campbell.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.30.2.4 2020/05/18 18:57:31 martin Exp $");

#include <sys/param.h>
#include <sys/types.h>
#include <sys/condvar.h>
#include <sys/cprng.h>
#include <sys/errno.h>
#include <sys/event.h>		/* XXX struct knote */
#include <sys/fcntl.h>		/* XXX FNONBLOCK */
#include <sys/kernel.h>
#include <sys/kmem.h>
#include <sys/lwp.h>
#include <sys/once.h>
#include <sys/percpu.h>
#include <sys/poll.h>		/* XXX POLLIN/POLLOUT/&c. */
#include <sys/select.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/rndsink.h>

#include <crypto/nist_hash_drbg/nist_hash_drbg.h>

#if defined(__HAVE_CPU_COUNTER)
#include <machine/cpu_counter.h>
#endif

static int sysctl_kern_urnd(SYSCTLFN_PROTO);
static int sysctl_kern_arnd(SYSCTLFN_PROTO);

static void	cprng_strong_generate(struct cprng_strong *, void *, size_t);
static void	cprng_strong_reseed(struct cprng_strong *);
static void	cprng_strong_reseed_from(struct cprng_strong *, const void *,
		    size_t, bool);

static rndsink_callback_t	cprng_strong_rndsink_callback;

void
cprng_init(void)
{
	static struct sysctllog *random_sysctllog;

	if (nist_hash_drbg_initialize() != 0)
		panic("NIST Hash_DRBG failed self-test");

	sysctl_createv(&random_sysctllog, 0, NULL, NULL,
		       CTLFLAG_PERMANENT,
		       CTLTYPE_INT, "urandom",
		       SYSCTL_DESCR("Random integer value"),
		       sysctl_kern_urnd, 0, NULL, 0,
		       CTL_KERN, KERN_URND, CTL_EOL);
	sysctl_createv(&random_sysctllog, 0, NULL, NULL,
		       CTLFLAG_PERMANENT,
		       CTLTYPE_INT, "arandom",
		       SYSCTL_DESCR("n bytes of random data"),
		       sysctl_kern_arnd, 0, NULL, 0,
		       CTL_KERN, KERN_ARND, CTL_EOL);
}

static inline uint32_t
cprng_counter(void)
{
	struct timeval tv;

#if defined(__HAVE_CPU_COUNTER)
	if (cpu_hascounter())
		return cpu_counter32();
#endif
	if (__predict_false(cold)) {
		static int ctr;
		/* microtime unsafe if clock not running yet */
		return ctr++;
	}
	getmicrotime(&tv);
	return (tv.tv_sec * 1000000 + tv.tv_usec);
}

struct cprng_strong {
	char		cs_name[16];
	int		cs_flags;
	kmutex_t	cs_lock;
	percpu_t	*cs_percpu;
	kcondvar_t	cs_cv;
	struct selinfo	cs_selq;
	struct rndsink	*cs_rndsink;
	bool		cs_ready;
	NIST_HASH_DRBG	cs_drbg;

	/* XXX Kludge for /dev/random `information-theoretic' properties.   */
	unsigned int	cs_remaining;
};

struct cprng_strong *
cprng_strong_create(const char *name, int ipl, int flags)
{
	const uint32_t cc = cprng_counter();
	struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng),
	    KM_SLEEP);

	/*
	 * rndsink_request takes a spin lock at IPL_VM, so we can be no
	 * higher than that.
	 */
	KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH);

	/* Initialize the easy fields.  */
	memset(cprng->cs_name, 0, sizeof(cprng->cs_name));
	(void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name));
	cprng->cs_flags = flags;
	mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl);
	cv_init(&cprng->cs_cv, cprng->cs_name);
	selinit(&cprng->cs_selq);
	cprng->cs_rndsink = rndsink_create(NIST_HASH_DRBG_MIN_SEEDLEN_BYTES,
	    &cprng_strong_rndsink_callback, cprng);

	/* Get some initial entropy.  Record whether it is full entropy.  */
	uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES];
	mutex_enter(&cprng->cs_lock);
	cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed,
	    sizeof(seed));
	if (nist_hash_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed),
		&cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name)))
		/* XXX Fix nist_hash_drbg API so this can't happen.  */
		panic("cprng %s: NIST Hash_DRBG instantiation failed",
		    cprng->cs_name);
	explicit_memset(seed, 0, sizeof(seed));

	if (ISSET(flags, CPRNG_HARD))
		cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES;
	else
		cprng->cs_remaining = 0;

	if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
		printf("cprng %s: creating with partial entropy\n",
		    cprng->cs_name);
	mutex_exit(&cprng->cs_lock);

	return cprng;
}

void
cprng_strong_destroy(struct cprng_strong *cprng)
{

	/*
	 * Destroy the rndsink first to prevent calls to the callback.
	 */
	rndsink_destroy(cprng->cs_rndsink);

	KASSERT(!cv_has_waiters(&cprng->cs_cv));
#if 0
	KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */
#endif

	nist_hash_drbg_destroy(&cprng->cs_drbg);
	seldestroy(&cprng->cs_selq);
	cv_destroy(&cprng->cs_cv);
	mutex_destroy(&cprng->cs_lock);

	explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */
	kmem_free(cprng, sizeof(*cprng));
}

/*
 * Generate some data from cprng.  Block or return zero bytes,
 * depending on flags & FNONBLOCK, if cprng was created without
 * CPRNG_REKEY_ANY.
 */
size_t
cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags)
{
	size_t result;

	/* Caller must loop for more than CPRNG_MAX_LEN bytes.  */
	bytes = MIN(bytes, CPRNG_MAX_LEN);

	mutex_enter(&cprng->cs_lock);

	if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) {
		if (!cprng->cs_ready)
			cprng_strong_reseed(cprng);
	} else {
		while (!cprng->cs_ready) {
			if (ISSET(flags, FNONBLOCK) ||
			    !ISSET(cprng->cs_flags, CPRNG_USE_CV) ||
			    cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) {
				result = 0;
				goto out;
			}
		}
	}

	/*
	 * Debit the entropy if requested.
	 *
	 * XXX Kludge for /dev/random `information-theoretic' properties.
	 */
	if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
		KASSERT(0 < cprng->cs_remaining);
		KASSERT(cprng->cs_remaining <=
		    NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
		if (bytes < cprng->cs_remaining) {
			cprng->cs_remaining -= bytes;
		} else {
			bytes = cprng->cs_remaining;
			cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES;
			cprng->cs_ready = false;
			rndsink_schedule(cprng->cs_rndsink);
		}
		KASSERT(bytes <= NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
		KASSERT(0 < cprng->cs_remaining);
		KASSERT(cprng->cs_remaining <=
		    NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
	}

	cprng_strong_generate(cprng, buffer, bytes);
	result = bytes;

out:	mutex_exit(&cprng->cs_lock);
	return result;
}

static void
filt_cprng_detach(struct knote *kn)
{
	struct cprng_strong *const cprng = kn->kn_hook;

	mutex_enter(&cprng->cs_lock);
	SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext);
	mutex_exit(&cprng->cs_lock);
}

static int
filt_cprng_read_event(struct knote *kn, long hint)
{
	struct cprng_strong *const cprng = kn->kn_hook;
	int ret;

	if (hint == NOTE_SUBMIT)
		KASSERT(mutex_owned(&cprng->cs_lock));
	else
		mutex_enter(&cprng->cs_lock);
	if (cprng->cs_ready) {
		kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large?  */
		ret = 1;
	} else {
		ret = 0;
	}
	if (hint == NOTE_SUBMIT)
		KASSERT(mutex_owned(&cprng->cs_lock));
	else
		mutex_exit(&cprng->cs_lock);

	return ret;
}

static int
filt_cprng_write_event(struct knote *kn, long hint)
{
	struct cprng_strong *const cprng = kn->kn_hook;

	if (hint == NOTE_SUBMIT)
		KASSERT(mutex_owned(&cprng->cs_lock));
	else
		mutex_enter(&cprng->cs_lock);

	kn->kn_data = 0;

	if (hint == NOTE_SUBMIT)
		KASSERT(mutex_owned(&cprng->cs_lock));
	else
		mutex_exit(&cprng->cs_lock);

	return 0;
}

static const struct filterops cprng_read_filtops = {
	.f_isfd = 1,
	.f_attach = NULL,
	.f_detach = filt_cprng_detach,
	.f_event = filt_cprng_read_event,
};

static const struct filterops cprng_write_filtops = {
	.f_isfd = 1,
	.f_attach = NULL,
	.f_detach = filt_cprng_detach,
	.f_event = filt_cprng_write_event,
};

int
cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn)
{

	switch (kn->kn_filter) {
	case EVFILT_READ:
		kn->kn_fop = &cprng_read_filtops;
		break;
	case EVFILT_WRITE:
		kn->kn_fop = &cprng_write_filtops;
		break;
	default:
		return EINVAL;
	}

	kn->kn_hook = cprng;
	mutex_enter(&cprng->cs_lock);
	SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext);
	mutex_exit(&cprng->cs_lock);
	return 0;
}

int
cprng_strong_poll(struct cprng_strong *cprng, int events)
{
	int revents;

	if (!ISSET(events, (POLLIN | POLLRDNORM)))
		return 0;

	mutex_enter(&cprng->cs_lock);
	if (cprng->cs_ready) {
		revents = (events & (POLLIN | POLLRDNORM));
	} else {
		selrecord(curlwp, &cprng->cs_selq);
		revents = 0;
	}
	mutex_exit(&cprng->cs_lock);

	return revents;
}

/*
 * XXX Move nist_hash_drbg_reseed_advised_p and
 * nist_hash_drbg_reseed_needed_p into the nist_hash_drbg API and make
 * the NIST_HASH_DRBG structure opaque.
 */
static bool
nist_hash_drbg_reseed_advised_p(NIST_HASH_DRBG *drbg)
{

	return (drbg->reseed_counter > (NIST_HASH_DRBG_RESEED_INTERVAL / 2));
}

static bool
nist_hash_drbg_reseed_needed_p(NIST_HASH_DRBG *drbg)
{

	return (drbg->reseed_counter >= NIST_HASH_DRBG_RESEED_INTERVAL);
}

/*
 * Generate some data from the underlying generator.
 */
static void
cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes)
{
	const uint32_t cc = cprng_counter();

	KASSERT(bytes <= CPRNG_MAX_LEN);
	KASSERT(mutex_owned(&cprng->cs_lock));

	/*
	 * Generate some data from the NIST Hash_DRBG.  Caller
	 * guarantees reseed if we're not ready, and if we exhaust the
	 * generator, we mark ourselves not ready.  Consequently, this
	 * call to the Hash_DRBG should not fail.
	 */
	if (__predict_false(nist_hash_drbg_generate(&cprng->cs_drbg, buffer,
		    bytes, &cc, sizeof(cc))))
		panic("cprng %s: NIST Hash_DRBG failed", cprng->cs_name);

	/*
	 * If we've been seeing a lot of use, ask for some fresh
	 * entropy soon.
	 */
	if (__predict_false(nist_hash_drbg_reseed_advised_p(&cprng->cs_drbg)))
		rndsink_schedule(cprng->cs_rndsink);

	/*
	 * If we just exhausted the generator, inform the next user
	 * that we need a reseed.
	 */
	if (__predict_false(nist_hash_drbg_reseed_needed_p(&cprng->cs_drbg))) {
		cprng->cs_ready = false;
		rndsink_schedule(cprng->cs_rndsink); /* paranoia */
	}
}

/*
 * Reseed with whatever we can get from the system entropy pool right now.
 */
static void
cprng_strong_reseed(struct cprng_strong *cprng)
{
	uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES];

	KASSERT(mutex_owned(&cprng->cs_lock));

	const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed,
	    sizeof(seed));
	cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy);
	explicit_memset(seed, 0, sizeof(seed));
}

/*
 * Reseed with the given seed.  If we now have full entropy, notify waiters.
 */
static void
cprng_strong_reseed_from(struct cprng_strong *cprng,
    const void *seed, size_t bytes, bool full_entropy)
{
	const uint32_t cc = cprng_counter();

	KASSERT(bytes == NIST_HASH_DRBG_MIN_SEEDLEN_BYTES);
	KASSERT(mutex_owned(&cprng->cs_lock));

	/*
	 * Notify anyone interested in the partiality of entropy in our
	 * seed -- anyone waiting for full entropy, or any system
	 * operators interested in knowing when the entropy pool is
	 * running on fumes.
	 */
	if (full_entropy) {
		if (!cprng->cs_ready) {
			cprng->cs_ready = true;
			cv_broadcast(&cprng->cs_cv);
			selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM),
			    NOTE_SUBMIT);
		}
	} else {
		/*
		 * XXX Is there is any harm in reseeding with partial
		 * entropy when we had full entropy before?  If so,
		 * remove the conditional on this message.
		 */
		if (!cprng->cs_ready &&
		    !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY))
			printf("cprng %s: reseeding with partial entropy\n",
			    cprng->cs_name);
	}

	if (nist_hash_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc,
		sizeof(cc)))
		/* XXX Fix nist_hash_drbg API so this can't happen.  */
		panic("cprng %s: NIST Hash_DRBG reseed failed",
		    cprng->cs_name);
}

/*
 * Feed entropy from an rndsink request into the CPRNG for which the
 * request was issued.
 */
static void
cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes)
{
	struct cprng_strong *const cprng = context;

	mutex_enter(&cprng->cs_lock);
	/* Assume that rndsinks provide only full-entropy output.  */
	cprng_strong_reseed_from(cprng, seed, bytes, true);
	mutex_exit(&cprng->cs_lock);
}

static ONCE_DECL(sysctl_prng_once);
static cprng_strong_t *sysctl_prng;

static int
makeprng(void)
{

	/* can't create in cprng_init(), too early */
	sysctl_prng = cprng_strong_create("sysctl", IPL_NONE,
					  CPRNG_INIT_ANY|CPRNG_REKEY_ANY);
	return 0;
}

/*
 * sysctl helper routine for kern.urandom node. Picks a random number
 * for you.
 */
static int
sysctl_kern_urnd(SYSCTLFN_ARGS)
{
	int v, rv;

	RUN_ONCE(&sysctl_prng_once, makeprng);
	rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0);
	if (rv == sizeof(v)) {
		struct sysctlnode node = *rnode;
		node.sysctl_data = &v;
		return (sysctl_lookup(SYSCTLFN_CALL(&node)));
	}
	else
		return (EIO);	/*XXX*/
}

/*
 * sysctl helper routine for kern.arandom node.  Fills the supplied
 * structure with random data for you.
 *
 * This node was originally declared as type "int" but its implementation
 * in OpenBSD, whence it came, would happily return up to 8K of data if
 * requested.  Evidently this was used to key RC4 in userspace.
 *
 * In NetBSD, the libc stack-smash-protection code reads 64 bytes
 * from here at every program startup.  So though it would be nice
 * to make this node return only 32 or 64 bits, we can't.  Too bad!
 */
static int
sysctl_kern_arnd(SYSCTLFN_ARGS)
{
	int error;
	void *v;
	struct sysctlnode node = *rnode;
	size_t n __diagused;

	switch (*oldlenp) {
	    case 0:
		return 0;
	    default:
		if (*oldlenp > 256) {
			*oldlenp = 256;
		}
		RUN_ONCE(&sysctl_prng_once, makeprng);
		v = kmem_alloc(*oldlenp, KM_SLEEP);
		n = cprng_strong(sysctl_prng, v, *oldlenp, 0);
		KASSERT(n == *oldlenp);
		node.sysctl_data = v;
		node.sysctl_size = *oldlenp;
		error = sysctl_lookup(SYSCTLFN_CALL(&node));
		kmem_free(v, *oldlenp);
		return error;
	}
}