Training courses

Kernel and Embedded Linux

Bootlin training courses

Embedded Linux, kernel,
Yocto Project, Buildroot, real-time,
graphics, boot time, debugging...

Bootlin logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
/*
 * ntp_monitor - monitor ntpd statistics
 */
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif

#include "ntpd.h"
#include "ntp_io.h"
#include "ntp_if.h"
#include "ntp_lists.h"
#include "ntp_stdlib.h"
#include <ntp_random.h>

#include <stdio.h>
#include <signal.h>
#ifdef HAVE_SYS_IOCTL_H
# include <sys/ioctl.h>
#endif

/*
 * Record statistics based on source address, mode and version. The
 * receive procedure calls us with the incoming rbufp before it does
 * anything else. While at it, implement rate controls for inbound
 * traffic.
 *
 * Each entry is doubly linked into two lists, a hash table and a most-
 * recently-used (MRU) list. When a packet arrives it is looked up in
 * the hash table. If found, the statistics are updated and the entry
 * relinked at the head of the MRU list. If not found, a new entry is
 * allocated, initialized and linked into both the hash table and at the
 * head of the MRU list.
 *
 * Memory is usually allocated by grabbing a big chunk of new memory and
 * cutting it up into littler pieces. The exception to this when we hit
 * the memory limit. Then we free memory by grabbing entries off the
 * tail for the MRU list, unlinking from the hash table, and
 * reinitializing.
 *
 * INC_MONLIST is the default allocation granularity in entries.
 * INIT_MONLIST is the default initial allocation in entries.
 */
#ifdef MONMEMINC		/* old name */
# define	INC_MONLIST	MONMEMINC
#elif !defined(INC_MONLIST)
# define	INC_MONLIST	(4 * 1024 / sizeof(mon_entry))
#endif
#ifndef INIT_MONLIST
# define	INIT_MONLIST	(4 * 1024 / sizeof(mon_entry))
#endif
#ifndef MRU_MAXDEPTH_DEF
# define MRU_MAXDEPTH_DEF	(1024 * 1024 / sizeof(mon_entry))
#endif

/*
 * Hashing stuff
 */
u_char	mon_hash_bits;

/*
 * Pointers to the hash table and the MRU list.  Memory for the hash
 * table is allocated only if monitoring is enabled.
 */
mon_entry **	mon_hash;	/* MRU hash table */
mon_entry	mon_mru_list;	/* mru listhead */

/*
 * List of free structures structures, and counters of in-use and total
 * structures. The free structures are linked with the hash_next field.
 */
static  mon_entry *mon_free;		/* free list or null if none */
	u_int mru_alloc;		/* mru list + free list count */
	u_int mru_entries;		/* mru list count */
	u_int mru_peakentries;		/* highest mru_entries seen */
	u_int mru_initalloc = INIT_MONLIST;/* entries to preallocate */
	u_int mru_incalloc = INC_MONLIST;/* allocation batch factor */
static	u_int mon_mem_increments;	/* times called malloc() */

/*
 * Parameters of the RES_LIMITED restriction option. We define headway
 * as the idle time between packets. A packet is discarded if the
 * headway is less than the minimum, as well as if the average headway
 * is less than eight times the increment.
 */
int	ntp_minpkt = NTP_MINPKT;	/* minimum (log 2 s) */
u_char	ntp_minpoll = NTP_MINPOLL;	/* increment (log 2 s) */

/*
 * Initialization state.  We may be monitoring, we may not.  If
 * we aren't, we may not even have allocated any memory yet.
 */
	u_int	mon_enabled;		/* enable switch */
	u_int	mru_mindepth = 600;	/* preempt above this */
	int	mru_maxage = 64;	/* for entries older than */
	u_int	mru_maxdepth = 		/* MRU count hard limit */
			MRU_MAXDEPTH_DEF;
	int	mon_age = 3000;		/* preemption limit */

static	void		mon_getmoremem(void);
static	void		remove_from_hash(mon_entry *);
static	inline void	mon_free_entry(mon_entry *);
static	inline void	mon_reclaim_entry(mon_entry *);


/*
 * init_mon - initialize monitoring global data
 */
void
init_mon(void)
{
	/*
	 * Don't do much of anything here.  We don't allocate memory
	 * until mon_start().
	 */
	mon_enabled = MON_OFF;
	INIT_DLIST(mon_mru_list, mru);
}


/*
 * remove_from_hash - removes an entry from the address hash table and
 *		      decrements mru_entries.
 */
static void
remove_from_hash(
	mon_entry *mon
	)
{
	u_int hash;
	mon_entry *punlinked;

	mru_entries--;
	hash = MON_HASH(&mon->rmtadr);
	UNLINK_SLIST(punlinked, mon_hash[hash], mon, hash_next,
		     mon_entry);
	ENSURE(punlinked == mon);
}


static inline void
mon_free_entry(
	mon_entry *m
	)
{
	ZERO(*m);
	LINK_SLIST(mon_free, m, hash_next);
}


/*
 * mon_reclaim_entry - Remove an entry from the MRU list and from the
 *		       hash array, then zero-initialize it.  Indirectly
 *		       decrements mru_entries.

 * The entry is prepared to be reused.  Before return, in
 * remove_from_hash(), mru_entries is decremented.  It is the caller's
 * responsibility to increment it again.
 */
static inline void
mon_reclaim_entry(
	mon_entry *m
	)
{
	DEBUG_INSIST(NULL != m);

	UNLINK_DLIST(m, mru);
	remove_from_hash(m);
	ZERO(*m);
}


/*
 * mon_getmoremem - get more memory and put it on the free list
 */
static void
mon_getmoremem(void)
{
	mon_entry *chunk;
	u_int entries;

	entries = (0 == mon_mem_increments)
		      ? mru_initalloc
		      : mru_incalloc;

	if (entries) {
		chunk = eallocarray(entries, sizeof(*chunk));
		mru_alloc += entries;
		for (chunk += entries; entries; entries--)
			mon_free_entry(--chunk);

		mon_mem_increments++;
	}
}


/*
 * mon_start - start up the monitoring software
 */
void
mon_start(
	int mode
	)
{
	size_t octets;
	u_int min_hash_slots;

	if (MON_OFF == mode)		/* MON_OFF is 0 */
		return;
	if (mon_enabled) {
		mon_enabled |= mode;
		return;
	}
	if (0 == mon_mem_increments)
		mon_getmoremem();
	/*
	 * Select the MRU hash table size to limit the average count
	 * per bucket at capacity (mru_maxdepth) to 8, if possible
	 * given our hash is limited to 16 bits.
	 */
	min_hash_slots = (mru_maxdepth / 8) + 1;
	mon_hash_bits = 0;
	while (min_hash_slots >>= 1)
		mon_hash_bits++;
	mon_hash_bits = max(4, mon_hash_bits);
	mon_hash_bits = min(16, mon_hash_bits);
	octets = sizeof(*mon_hash) * MON_HASH_SIZE;
	mon_hash = erealloc_zero(mon_hash, octets, 0);

	mon_enabled = mode;
}


/*
 * mon_stop - stop the monitoring software
 */
void
mon_stop(
	int mode
	)
{
	mon_entry *mon;

	if (MON_OFF == mon_enabled)
		return;
	if ((mon_enabled & mode) == 0 || mode == MON_OFF)
		return;

	mon_enabled &= ~mode;
	if (mon_enabled != MON_OFF)
		return;
	
	/*
	 * Move everything on the MRU list to the free list quickly,
	 * without bothering to remove each from either the MRU list or
	 * the hash table.
	 */
	ITER_DLIST_BEGIN(mon_mru_list, mon, mru, mon_entry)
		mon_free_entry(mon);
	ITER_DLIST_END()

	/* empty the MRU list and hash table. */
	mru_entries = 0;
	INIT_DLIST(mon_mru_list, mru);
	zero_mem(mon_hash, sizeof(*mon_hash) * MON_HASH_SIZE);
}


/*
 * mon_clearinterface -- remove mru entries referring to a local address
 *			 which is going away.
 */
void
mon_clearinterface(
	endpt *lcladr
	)
{
	mon_entry *mon;

	/* iterate mon over mon_mru_list */
	ITER_DLIST_BEGIN(mon_mru_list, mon, mru, mon_entry)
		if (mon->lcladr == lcladr) {
			/* remove from mru list */
			UNLINK_DLIST(mon, mru);
			/* remove from hash list, adjust mru_entries */
			remove_from_hash(mon);
			/* put on free list */
			mon_free_entry(mon);
		}
	ITER_DLIST_END()
}


/*
 * ntp_monitor - record stats about this packet
 *
 * Returns supplied restriction flags, with RES_LIMITED and RES_KOD
 * cleared unless the packet should not be responded to normally
 * (RES_LIMITED) and possibly should trigger a KoD response (RES_KOD).
 * The returned flags are saved in the MRU entry, so that it reflects
 * whether the last packet from that source triggered rate limiting,
 * and if so, possible KoD response.  This implies you can not tell
 * whether a given address is eligible for rate limiting/KoD from the
 * monlist restrict bits, only whether or not the last packet triggered
 * such responses.  ntpdc -c reslist lets you see whether RES_LIMITED
 * or RES_KOD is lit for a particular address before ntp_monitor()'s
 * typical dousing.
 */
u_short
ntp_monitor(
	struct recvbuf *rbufp,
	u_short	flags
	)
{
	l_fp		interval_fp;
	struct pkt *	pkt;
	mon_entry *	mon;
	mon_entry *	oldest;
	int		oldest_age;
	u_int		hash;
	u_short		restrict_mask;
	u_char		mode;
	u_char		version;
	int		interval;
	int		head;		/* headway increment */
	int		leak;		/* new headway */
	int		limit;		/* average threshold */

	REQUIRE(rbufp != NULL);

	if (mon_enabled == MON_OFF)
		return ~(RES_LIMITED | RES_KOD) & flags;

	pkt = &rbufp->recv_pkt;
	hash = MON_HASH(&rbufp->recv_srcadr);
	mode = PKT_MODE(pkt->li_vn_mode);
	version = PKT_VERSION(pkt->li_vn_mode);
	mon = mon_hash[hash];

	/*
	 * We keep track of all traffic for a given IP in one entry,
	 * otherwise cron'ed ntpdate or similar evades RES_LIMITED.
	 */

	for (; mon != NULL; mon = mon->hash_next)
		if (SOCK_EQ(&mon->rmtadr, &rbufp->recv_srcadr))
			break;

	if (mon != NULL) {
		interval_fp = rbufp->recv_time;
		L_SUB(&interval_fp, &mon->last);
		/* add one-half second to round up */
		L_ADDUF(&interval_fp, 0x80000000);
		interval = interval_fp.l_i;
		mon->last = rbufp->recv_time;
		NSRCPORT(&mon->rmtadr) = NSRCPORT(&rbufp->recv_srcadr);
		mon->count++;
		restrict_mask = flags;
		mon->vn_mode = VN_MODE(version, mode);

		/* Shuffle to the head of the MRU list. */
		UNLINK_DLIST(mon, mru);
		LINK_DLIST(mon_mru_list, mon, mru);

		/*
		 * At this point the most recent arrival is first in the
		 * MRU list.  Decrease the counter by the headway, but
		 * not less than zero.
		 */
		mon->leak -= interval;
		mon->leak = max(0, mon->leak);
		head = 1 << ntp_minpoll;
		leak = mon->leak + head;
		limit = NTP_SHIFT * head;

		DPRINTF(2, ("MRU: interval %d headway %d limit %d\n",
			    interval, leak, limit));

		/*
		 * If the minimum and average thresholds are not
		 * exceeded, douse the RES_LIMITED and RES_KOD bits and
		 * increase the counter by the headway increment.  Note
		 * that we give a 1-s grace for the minimum threshold
		 * and a 2-s grace for the headway increment.  If one or
		 * both thresholds are exceeded and the old counter is
		 * less than the average threshold, set the counter to
		 * the average threshold plus the increment and leave
		 * the RES_LIMITED and RES_KOD bits lit. Otherwise,
		 * leave the counter alone and douse the RES_KOD bit.
		 * This rate-limits the KoDs to no less than the average
		 * headway.
		 */
		if (interval + 1 >= ntp_minpkt && leak < limit) {
			mon->leak = leak - 2;
			restrict_mask &= ~(RES_LIMITED | RES_KOD);
		} else if (mon->leak < limit)
			mon->leak = limit + head;
		else
			restrict_mask &= ~RES_KOD;

		mon->flags = restrict_mask;

		return mon->flags;
	}

	/*
	 * If we got here, this is the first we've heard of this
	 * guy.  Get him some memory, either from the free list
	 * or from the tail of the MRU list.
	 *
	 * The following ntp.conf "mru" knobs come into play determining
	 * the depth (or count) of the MRU list:
	 * - mru_mindepth ("mru mindepth") is a floor beneath which
	 *   entries are kept without regard to their age.  The
	 *   default is 600 which matches the longtime implementation
	 *   limit on the total number of entries.
	 * - mru_maxage ("mru maxage") is a ceiling on the age in
	 *   seconds of entries.  Entries older than this are
	 *   reclaimed once mon_mindepth is exceeded.  64s default.
	 *   Note that entries older than this can easily survive
	 *   as they are reclaimed only as needed.
	 * - mru_maxdepth ("mru maxdepth") is a hard limit on the
	 *   number of entries.
	 * - "mru maxmem" sets mru_maxdepth to the number of entries
	 *   which fit in the given number of kilobytes.  The default is
	 *   1024, or 1 megabyte.
	 * - mru_initalloc ("mru initalloc" sets the count of the
	 *   initial allocation of MRU entries.
	 * - "mru initmem" sets mru_initalloc in units of kilobytes.
	 *   The default is 4.
	 * - mru_incalloc ("mru incalloc" sets the number of entries to
	 *   allocate on-demand each time the free list is empty.
	 * - "mru incmem" sets mru_incalloc in units of kilobytes.
	 *   The default is 4.
	 * Whichever of "mru maxmem" or "mru maxdepth" occurs last in
	 * ntp.conf controls.  Similarly for "mru initalloc" and "mru
	 * initmem", and for "mru incalloc" and "mru incmem".
	 */
	if (mru_entries < mru_mindepth) {
		if (NULL == mon_free)
			mon_getmoremem();
		UNLINK_HEAD_SLIST(mon, mon_free, hash_next);
	} else {
		oldest = TAIL_DLIST(mon_mru_list, mru);
		oldest_age = 0;		/* silence uninit warning */
		if (oldest != NULL) {
			interval_fp = rbufp->recv_time;
			L_SUB(&interval_fp, &oldest->last);
			/* add one-half second to round up */
			L_ADDUF(&interval_fp, 0x80000000);
			oldest_age = interval_fp.l_i;
		}
		/* note -1 is legal for mru_maxage (disables) */
		if (oldest != NULL && mru_maxage < oldest_age) {
			mon_reclaim_entry(oldest);
			mon = oldest;
		} else if (mon_free != NULL || mru_alloc <
			   mru_maxdepth) {
			if (NULL == mon_free)
				mon_getmoremem();
			UNLINK_HEAD_SLIST(mon, mon_free, hash_next);
		/* Preempt from the MRU list if old enough. */
		} else if (ntp_random() / (2. * FRAC) >
			   (double)oldest_age / mon_age) {
			return ~(RES_LIMITED | RES_KOD) & flags;
		} else {
			mon_reclaim_entry(oldest);
			mon = oldest;
		}
	}

	INSIST(mon != NULL);

	/*
	 * Got one, initialize it
	 */
	mru_entries++;
	mru_peakentries = max(mru_peakentries, mru_entries);
	mon->last = rbufp->recv_time;
	mon->first = mon->last;
	mon->count = 1;
	mon->flags = ~(RES_LIMITED | RES_KOD) & flags;
	mon->leak = 0;
	memcpy(&mon->rmtadr, &rbufp->recv_srcadr, sizeof(mon->rmtadr));
	mon->vn_mode = VN_MODE(version, mode);
	mon->lcladr = rbufp->dstadr;
	mon->cast_flags = (u_char)(((rbufp->dstadr->flags &
	    INT_MCASTOPEN) && rbufp->fd == mon->lcladr->fd) ? MDF_MCAST
	    : rbufp->fd == mon->lcladr->bfd ? MDF_BCAST : MDF_UCAST);

	/*
	 * Drop him into front of the hash table. Also put him on top of
	 * the MRU list.
	 */
	LINK_SLIST(mon_hash[hash], mon, hash_next);
	LINK_DLIST(mon_mru_list, mon, mru);

	return mon->flags;
}