Training courses

Kernel and Embedded Linux

Bootlin training courses

Embedded Linux, kernel,
Yocto Project, Buildroot, real-time,
graphics, boot time, debugging...

Bootlin logo

Elixir Cross Referencer

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
/*
 * ntp_loopfilter.c - implements the NTP loop filter algorithm
 *
 * ATTENTION: Get approval from Dave Mills on all changes to this file!
 *
 */
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif

#ifdef USE_SNPRINTB
# include <util.h>
#endif
#include "ntpd.h"
#include "ntp_io.h"
#include "ntp_unixtime.h"
#include "ntp_stdlib.h"
#include "timexsup.h"

#include <limits.h>
#include <stdio.h>
#include <ctype.h>

#include <signal.h>
#include <setjmp.h>

#ifdef KERNEL_PLL
#include "ntp_syscall.h"
#endif /* KERNEL_PLL */

/*
 * This is an implementation of the clock discipline algorithm described
 * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
 * hybrid phase/frequency-lock loop. A number of sanity checks are
 * included to protect against timewarps, timespikes and general mayhem.
 * All units are in s and s/s, unless noted otherwise.
 */
#define CLOCK_MAX	.128	/* default step threshold (s) */
#define CLOCK_MINSTEP	300.	/* default stepout threshold (s) */
#define CLOCK_PANIC	1000.	/* default panic threshold (s) */
#define	CLOCK_PHI	15e-6	/* max frequency error (s/s) */
#define CLOCK_PLL	16.	/* PLL loop gain (log2) */
#define CLOCK_AVG	8.	/* parameter averaging constant */
#define CLOCK_FLL	.25	/* FLL loop gain */
#define	CLOCK_FLOOR	.0005	/* startup offset floor (s) */
#define	CLOCK_ALLAN	11	/* Allan intercept (log2 s) */
#define CLOCK_LIMIT	30	/* poll-adjust threshold */
#define CLOCK_PGATE	4.	/* poll-adjust gate */
#define PPS_MAXAGE	120	/* kernel pps signal timeout (s) */
#define	FREQTOD(x)	((x) / 65536e6) /* NTP to double */
#define	DTOFREQ(x)	((int32)((x) * 65536e6)) /* double to NTP */

/*
 * Clock discipline state machine. This is used to control the
 * synchronization behavior during initialization and following a
 * timewarp.
 *
 *	State	< step		> step		Comments
 *	========================================================
 *	NSET	FREQ		step, FREQ	freq not set
 *
 *	FSET	SYNC		step, SYNC	freq set
 *
 *	FREQ	if (mu < 900)	if (mu < 900)	set freq direct
 *		    ignore	    ignore
 *		else		else
 *		    freq, SYNC	    freq, step, SYNC
 *
 *	SYNC	SYNC		SPIK, ignore	adjust phase/freq
 *
 *	SPIK	SYNC		if (mu < 900)	adjust phase/freq
 *				    ignore
 *				step, SYNC
 */
/*
 * Kernel PLL/PPS state machine. This is used with the kernel PLL
 * modifications described in the documentation.
 *
 * If kernel support for the ntp_adjtime() system call is available, the
 * ntp_control flag is set. The ntp_enable and kern_enable flags can be
 * set at configuration time or run time using ntpdc. If ntp_enable is
 * false, the discipline loop is unlocked and no corrections of any kind
 * are made. If both ntp_control and kern_enable are set, the kernel
 * support is used as described above; if false, the kernel is bypassed
 * entirely and the daemon discipline used instead.
 *
 * There have been three versions of the kernel discipline code. The
 * first (microkernel) now in Solaris discipilnes the microseconds. The
 * second and third (nanokernel) disciplines the clock in nanoseconds.
 * These versions are identifed if the symbol STA_PLL is present in the
 * header file /usr/include/sys/timex.h. The third and current version
 * includes TAI offset and is identified by the symbol NTP_API with
 * value 4.
 *
 * Each PPS time/frequency discipline can be enabled by the atom driver
 * or another driver. If enabled, the STA_PPSTIME and STA_FREQ bits are
 * set in the kernel status word; otherwise, these bits are cleared.
 * These bits are also cleard if the kernel reports an error.
 *
 * If an external clock is present, the clock driver sets STA_CLK in the
 * status word. When the local clock driver sees this bit, it updates
 * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
 * set to zero, in which case the system clock is not adjusted. This is
 * also a signal for the external clock driver to discipline the system
 * clock. Unless specified otherwise, all times are in seconds.
 */
/*
 * Program variables that can be tinkered.
 */
double	clock_max_back = CLOCK_MAX;	/* step threshold */
double	clock_max_fwd =  CLOCK_MAX;	/* step threshold */
double	clock_minstep = CLOCK_MINSTEP; /* stepout threshold */
double	clock_panic = CLOCK_PANIC; /* panic threshold */
double	clock_phi = CLOCK_PHI;	/* dispersion rate (s/s) */
u_char	allan_xpt = CLOCK_ALLAN; /* Allan intercept (log2 s) */

/*
 * Program variables
 */
static double clock_offset;	/* offset */
double	clock_jitter;		/* offset jitter */
double	drift_comp;		/* frequency (s/s) */
static double init_drift_comp; /* initial frequency (PPM) */
double	clock_stability;	/* frequency stability (wander) (s/s) */
double	clock_codec;		/* audio codec frequency (samples/s) */
static u_long clock_epoch;	/* last update */
u_int	sys_tai;		/* TAI offset from UTC */
static int loop_started;	/* TRUE after LOOP_DRIFTINIT */
static void rstclock (int, double); /* transition function */
static double direct_freq(double); /* direct set frequency */
static void set_freq(double);	/* set frequency */
#ifndef PATH_MAX
# define PATH_MAX MAX_PATH
#endif
static char relative_path[PATH_MAX + 1]; /* relative path per recursive make */
static char *this_file = NULL;

#ifdef KERNEL_PLL
static struct timex ntv;	/* ntp_adjtime() parameters */
int	pll_status;		/* last kernel status bits */
#if defined(STA_NANO) && NTP_API == 4
static u_int loop_tai;		/* last TAI offset */
#endif /* STA_NANO */
static	void	start_kern_loop(void);
static	void	stop_kern_loop(void);
#endif /* KERNEL_PLL */

/*
 * Clock state machine control flags
 */
int	ntp_enable = TRUE;	/* clock discipline enabled */
int	pll_control;		/* kernel support available */
int	kern_enable = TRUE;	/* kernel support enabled */
int	hardpps_enable;		/* kernel PPS discipline enabled */
int	ext_enable;		/* external clock enabled */
int	pps_stratum;		/* pps stratum */
int	kernel_status;		/* from ntp_adjtime */
int	force_step_once = FALSE; /* always step time once at startup (-G) */
int	mode_ntpdate = FALSE;	/* exit on first clock set (-q) */
int	freq_cnt;		/* initial frequency clamp */
int	freq_set;		/* initial set frequency switch */

/*
 * Clock state machine variables
 */
int	state = 0;		/* clock discipline state */
u_char	sys_poll;		/* time constant/poll (log2 s) */
int	tc_counter;		/* jiggle counter */
double	last_offset;		/* last offset (s) */

u_int	tc_twinlo;		/* TC step down not before this time */
u_int	tc_twinhi;		/* TC step up not before this time */ 

/*
 * Huff-n'-puff filter variables
 */
static double *sys_huffpuff;	/* huff-n'-puff filter */
static int sys_hufflen;		/* huff-n'-puff filter stages */
static int sys_huffptr;		/* huff-n'-puff filter pointer */
static double sys_mindly;	/* huff-n'-puff filter min delay */

#if defined(KERNEL_PLL)
/* Emacs cc-mode goes nuts if we split the next line... */
#define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
    MOD_STATUS | MOD_TIMECONST)
#ifdef SIGSYS
static void pll_trap (int);	/* configuration trap */
static struct sigaction sigsys;	/* current sigaction status */
static struct sigaction newsigsys; /* new sigaction status */
static sigjmp_buf env;		/* environment var. for pll_trap() */
#endif /* SIGSYS */
#endif /* KERNEL_PLL */

static void
sync_status(const char *what, int ostatus, int nstatus)
{
	char obuf[256], nbuf[256], tbuf[1024];
#if defined(USE_SNPRINTB) && defined (STA_FMT)
	snprintb(obuf, sizeof(obuf), STA_FMT, ostatus);
	snprintb(nbuf, sizeof(nbuf), STA_FMT, nstatus);
#else
	snprintf(obuf, sizeof(obuf), "%04x", ostatus);
	snprintf(nbuf, sizeof(nbuf), "%04x", nstatus);
#endif
	snprintf(tbuf, sizeof(tbuf), "%s status: %s -> %s", what, obuf, nbuf);
	report_event(EVNT_KERN, NULL, tbuf);
}

/*
 * file_name - return pointer to non-relative portion of this C file pathname
 */
static char *file_name(void)
{
	if (this_file == NULL) {
	    (void)strncpy(relative_path, __FILE__, PATH_MAX);
	    for (this_file=relative_path;
		*this_file && ! isalnum((unsigned char)*this_file);
		this_file++) ;
	}
	return this_file;
}

/*
 * init_loopfilter - initialize loop filter data
 */
void
init_loopfilter(void)
{
	/*
	 * Initialize state variables.
	 */
	sys_poll = ntp_minpoll;
	clock_jitter = LOGTOD(sys_precision);
	freq_cnt = (int)clock_minstep;
}

#ifdef KERNEL_PLL
/*
 * ntp_adjtime_error_handler - process errors from ntp_adjtime
 */
static void
ntp_adjtime_error_handler(
	const char *caller,	/* name of calling function */
	struct timex *ptimex,	/* pointer to struct timex */
	int ret,		/* return value from ntp_adjtime */
	int saved_errno,	/* value of errno when ntp_adjtime returned */
	int pps_call,		/* ntp_adjtime call was PPS-related */
	int tai_call,		/* ntp_adjtime call was TAI-related */
	int line		/* line number of ntp_adjtime call */
	)
{
	char des[1024] = "";	/* Decoded Error Status */
	char *dbp, *ebp;

	dbp = des;
	ebp = dbp + sizeof(des);
	
	switch (ret) {
	    case -1:
		switch (saved_errno) {
		    case EFAULT:
			msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex pointer: 0x%lx",
			    caller, file_name(), line,
			    (long)((void *)ptimex)
			);
		    break;
		    case EINVAL:
			msyslog(LOG_ERR, "%s: %s line %d: invalid struct timex \"constant\" element value: %ld",
			    caller, file_name(), line,
			    (long)(ptimex->constant)
			);
		    break;
		    case EPERM:
			if (tai_call) {
			    errno = saved_errno;
			    msyslog(LOG_ERR,
				"%s: ntp_adjtime(TAI) failed: %m",
				caller);
			}
			errno = saved_errno;
			msyslog(LOG_ERR, "%s: %s line %d: ntp_adjtime: %m",
			    caller, file_name(), line
			);
		    break;
		    default:
			msyslog(LOG_NOTICE, "%s: %s line %d: unhandled errno value %d after failed ntp_adjtime call",
			    caller, file_name(), line,
			    saved_errno
			);
		    break;
		}
	    break;
#ifdef TIME_OK
	    case TIME_OK: /* 0: synchronized, no leap second warning */
		/* msyslog(LOG_INFO, "kernel reports time is synchronized normally"); */
	    break;
#else
# warning TIME_OK is not defined
#endif
#ifdef TIME_INS
	    case TIME_INS: /* 1: positive leap second warning */
		msyslog(LOG_INFO, "kernel reports leap second insertion scheduled");
	    break;
#else
# warning TIME_INS is not defined
#endif
#ifdef TIME_DEL
	    case TIME_DEL: /* 2: negative leap second warning */
		msyslog(LOG_INFO, "kernel reports leap second deletion scheduled");
	    break;
#else
# warning TIME_DEL is not defined
#endif
#ifdef TIME_OOP
	    case TIME_OOP: /* 3: leap second in progress */
		msyslog(LOG_INFO, "kernel reports leap second in progress");
	    break;
#else
# warning TIME_OOP is not defined
#endif
#ifdef TIME_WAIT
	    case TIME_WAIT: /* 4: leap second has occured */
		msyslog(LOG_INFO, "kernel reports leap second has occurred");
	    break;
#else
# warning TIME_WAIT is not defined
#endif
#ifdef TIME_ERROR
#if 0

from the reference implementation of ntp_gettime():

		// Hardware or software error
        if ((time_status & (STA_UNSYNC | STA_CLOCKERR))

	/*
         * PPS signal lost when either time or frequency synchronization
         * requested
         */
	|| (time_status & (STA_PPSFREQ | STA_PPSTIME)
	    && !(time_status & STA_PPSSIGNAL))

        /*
         * PPS jitter exceeded when time synchronization requested
         */
	|| (time_status & STA_PPSTIME &&
            time_status & STA_PPSJITTER)

        /*
         * PPS wander exceeded or calibration error when frequency
         * synchronization requested
         */
	|| (time_status & STA_PPSFREQ &&
            time_status & (STA_PPSWANDER | STA_PPSERROR)))
                return (TIME_ERROR);

or, from ntp_adjtime():

	if (  (time_status & (STA_UNSYNC | STA_CLOCKERR))
	    || (time_status & (STA_PPSFREQ | STA_PPSTIME)
		&& !(time_status & STA_PPSSIGNAL)) 
	    || (time_status & STA_PPSTIME
		&& time_status & STA_PPSJITTER)
	    || (time_status & STA_PPSFREQ
		&& time_status & (STA_PPSWANDER | STA_PPSERROR))
	   )
		return (TIME_ERROR);
#endif

	    case TIME_ERROR: /* 5: unsynchronized, or loss of synchronization */
				/* error (see status word) */

		if (ptimex->status & STA_UNSYNC)
			xsbprintf(&dbp, ebp, "%sClock Unsynchronized",
				 (*des) ? "; " : "");

		if (ptimex->status & STA_CLOCKERR)
		    xsbprintf(&dbp, ebp, "%sClock Error",
			      (*des) ? "; " : "");

		if (!(ptimex->status & STA_PPSSIGNAL)
		    && ptimex->status & STA_PPSFREQ)
		    xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but no PPS",
			      (*des) ? "; " : "");

		if (!(ptimex->status & STA_PPSSIGNAL)
		    && ptimex->status & STA_PPSTIME)
			xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but no PPS signal",
				  (*des) ? "; " : "");

		if (   ptimex->status & STA_PPSTIME
		    && ptimex->status & STA_PPSJITTER)
			xsbprintf(&dbp, ebp, "%sPPS Time Sync wanted but PPS Jitter exceeded",
				  (*des) ? "; " : "");

		if (   ptimex->status & STA_PPSFREQ
		    && ptimex->status & STA_PPSWANDER)
			xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but PPS Wander exceeded",
				  (*des) ? "; " : "");

		if (   ptimex->status & STA_PPSFREQ
		    && ptimex->status & STA_PPSERROR)
			xsbprintf(&dbp, ebp, "%sPPS Frequency Sync wanted but Calibration error detected",
				  (*des) ? "; " : "");

		if (pps_call && !(ptimex->status & STA_PPSSIGNAL))
			report_event(EVNT_KERN, NULL,
			    "no PPS signal");
		DPRINTF(1, ("kernel loop status %#x (%s)\n",
			ptimex->status, des));
		/*
		 * This code may be returned when ntp_adjtime() has just
		 * been called for the first time, quite a while after
		 * startup, when ntpd just starts to discipline the kernel
		 * time. In this case the occurrence of this message
		 * can be pretty confusing.
		 *
		 * HMS: How about a message when we begin kernel processing:
		 *    Determining kernel clock state...
		 * so an initial TIME_ERROR message is less confising,
		 * or skipping the first message (ugh),
		 * or ???
		 * msyslog(LOG_INFO, "kernel reports time synchronization lost");
		 */
		msyslog(LOG_INFO, "kernel reports TIME_ERROR: %#x: %s",
			ptimex->status, des);
	    break;
#else
# warning TIME_ERROR is not defined
#endif
	    default:
		msyslog(LOG_NOTICE, "%s: %s line %d: unhandled return value %d from ntp_adjtime() in %s at line %d",
		    caller, file_name(), line,
		    ret,
		    __func__, __LINE__
		);
	    break;
	}
	return;
}
#endif

/*
 * local_clock - the NTP logical clock loop filter.
 *
 * Return codes:
 * -1	update ignored: exceeds panic threshold
 * 0	update ignored: popcorn or exceeds step threshold
 * 1	clock was slewed
 * 2	clock was stepped
 *
 * LOCKCLOCK: The only thing this routine does is set the
 * sys_rootdisp variable equal to the peer dispersion.
 */
int
local_clock(
	struct	peer *peer,	/* synch source peer structure */
	double	fp_offset	/* clock offset (s) */
	)
{
	int	rval;		/* return code */
	int	osys_poll;	/* old system poll */
	int	ntp_adj_ret;	/* returned by ntp_adjtime */
	double	mu;		/* interval since last update */
	double	clock_frequency; /* clock frequency */
	double	dtemp, etemp;	/* double temps */
	char	tbuf[80];	/* report buffer */

	(void)ntp_adj_ret; /* not always used below... */
	/*
	 * If the loop is opened or the NIST LOCKCLOCK is in use,
	 * monitor and record the offsets anyway in order to determine
	 * the open-loop response and then go home.
	 */
#ifndef LOCKCLOCK
	if (!ntp_enable)
#endif /* not LOCKCLOCK */
	{
		record_loop_stats(fp_offset, drift_comp, clock_jitter,
		    clock_stability, sys_poll);
		return (0);
	}

#ifndef LOCKCLOCK
	/*
	 * If the clock is way off, panic is declared. The clock_panic
	 * defaults to 1000 s; if set to zero, the panic will never
	 * occur. The allow_panic defaults to FALSE, so the first panic
	 * will exit. It can be set TRUE by a command line option, in
	 * which case the clock will be set anyway and time marches on.
	 * But, allow_panic will be set FALSE when the update is less
	 * than the step threshold; so, subsequent panics will exit.
	 */
	if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
	    !allow_panic) {
		snprintf(tbuf, sizeof(tbuf),
		    "%+.0f s; set clock manually within %.0f s.",
		    fp_offset, clock_panic);
		report_event(EVNT_SYSFAULT, NULL, tbuf);
		return (-1);
	}

	allow_panic = FALSE;

	/*
	 * This section simulates ntpdate. If the offset exceeds the
	 * step threshold (128 ms), step the clock to that time and
	 * exit. Otherwise, slew the clock to that time and exit. Note
	 * that the slew will persist and eventually complete beyond the
	 * life of this program. Note that while ntpdate is active, the
	 * terminal does not detach, so the termination message prints
	 * directly to the terminal.
	 */
	if (mode_ntpdate) {
		if (  ( fp_offset > clock_max_fwd  && clock_max_fwd  > 0)
		   || (-fp_offset > clock_max_back && clock_max_back > 0)) {
			step_systime(fp_offset);
			msyslog(LOG_NOTICE, "ntpd: time set %+.6f s",
			    fp_offset);
			printf("ntpd: time set %+.6fs\n", fp_offset);
		} else {
			adj_systime(fp_offset);
			msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s",
			    fp_offset);
			printf("ntpd: time slew %+.6fs\n", fp_offset);
		}
		record_loop_stats(fp_offset, drift_comp, clock_jitter,
		    clock_stability, sys_poll);
		exit (0);
	}

	/*
	 * The huff-n'-puff filter finds the lowest delay in the recent
	 * interval. This is used to correct the offset by one-half the
	 * difference between the sample delay and minimum delay. This
	 * is most effective if the delays are highly assymetric and
	 * clockhopping is avoided and the clock frequency wander is
	 * relatively small.
	 */
	if (sys_huffpuff != NULL) {
		if (peer->delay < sys_huffpuff[sys_huffptr])
			sys_huffpuff[sys_huffptr] = peer->delay;
		if (peer->delay < sys_mindly)
			sys_mindly = peer->delay;
		if (fp_offset > 0)
			dtemp = -(peer->delay - sys_mindly) / 2;
		else
			dtemp = (peer->delay - sys_mindly) / 2;
		fp_offset += dtemp;
		DPRINTF(1, ("local_clock: size %d mindly %.6f huffpuff %.6f\n",
			    sys_hufflen, sys_mindly, dtemp));
	}

	/*
	 * Clock state machine transition function which defines how the
	 * system reacts to large phase and frequency excursion. There
	 * are two main regimes: when the offset exceeds the step
	 * threshold (128 ms) and when it does not. Under certain
	 * conditions updates are suspended until the stepout theshold
	 * (900 s) is exceeded. See the documentation on how these
	 * thresholds interact with commands and command line options.
	 *
	 * Note the kernel is disabled if step is disabled or greater
	 * than 0.5 s or in ntpdate mode.
	 */
	osys_poll = sys_poll;
	if (sys_poll < peer->minpoll)
		sys_poll = peer->minpoll;
	if (sys_poll > peer->maxpoll)
		sys_poll = peer->maxpoll;
	mu = current_time - clock_epoch;
	clock_frequency = drift_comp;
	rval = 1;
	if (  ( fp_offset > clock_max_fwd  && clock_max_fwd  > 0)
	   || (-fp_offset > clock_max_back && clock_max_back > 0)
	   || force_step_once ) {
		if (force_step_once) {
			force_step_once = FALSE;  /* we want this only once after startup */
			msyslog(LOG_NOTICE, "Doing intital time step" );
		}

		switch (state) {

		/*
		 * In SYNC state we ignore the first outlier and switch
		 * to SPIK state.
		 */
		case EVNT_SYNC:
			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
			    fp_offset);
			report_event(EVNT_SPIK, NULL, tbuf);
			state = EVNT_SPIK;
			return (0);

		/*
		 * In FREQ state we ignore outliers and inlyers. At the
		 * first outlier after the stepout threshold, compute
		 * the apparent frequency correction and step the phase.
		 */
		case EVNT_FREQ:
			if (mu < clock_minstep)
				return (0);

			clock_frequency = direct_freq(fp_offset);

			/* fall through to EVNT_SPIK */

		/*
		 * In SPIK state we ignore succeeding outliers until
		 * either an inlyer is found or the stepout threshold is
		 * exceeded.
		 */
		case EVNT_SPIK:
			if (mu < clock_minstep)
				return (0);

			/* fall through to default */

		/*
		 * We get here by default in NSET and FSET states and
		 * from above in FREQ or SPIK states.
		 *
		 * In NSET state an initial frequency correction is not
		 * available, usually because the frequency file has not
		 * yet been written. Since the time is outside the step
		 * threshold, the clock is stepped. The frequency will
		 * be set directly following the stepout interval.
		 *
		 * In FSET state the initial frequency has been set from
		 * the frequency file. Since the time is outside the
		 * step threshold, the clock is stepped immediately,
		 * rather than after the stepout interval. Guys get
		 * nervous if it takes 15 minutes to set the clock for
		 * the first time.
		 *
		 * In FREQ and SPIK states the stepout threshold has
		 * expired and the phase is still above the step
		 * threshold. Note that a single spike greater than the
		 * step threshold is always suppressed, even with a
		 * long time constant.
		 */
		default:
			snprintf(tbuf, sizeof(tbuf), "%+.6f s",
			    fp_offset);
			report_event(EVNT_CLOCKRESET, NULL, tbuf);
			step_systime(fp_offset);
			reinit_timer();
			tc_counter = 0;
			clock_jitter = LOGTOD(sys_precision);
			rval = 2;
			if (state == EVNT_NSET) {
				rstclock(EVNT_FREQ, 0);
				return (rval);
			}
			break;
		}
		rstclock(EVNT_SYNC, 0);
	} else {
		/*
		 * The offset is less than the step threshold. Calculate
		 * the jitter as the exponentially weighted offset
		 * differences.
		 */
		etemp = SQUARE(clock_jitter);
		dtemp = SQUARE(max(fabs(fp_offset - last_offset),
		    LOGTOD(sys_precision)));
		clock_jitter = SQRT(etemp + (dtemp - etemp) /
		    CLOCK_AVG);
		switch (state) {

		/*
		 * In NSET state this is the first update received and
		 * the frequency has not been initialized. Adjust the
		 * phase, but do not adjust the frequency until after
		 * the stepout threshold.
		 */
		case EVNT_NSET:
			adj_systime(fp_offset);
			rstclock(EVNT_FREQ, fp_offset);
			break;

		/*
		 * In FREQ state ignore updates until the stepout
		 * threshold. After that, compute the new frequency, but
		 * do not adjust the frequency until the holdoff counter
		 * decrements to zero.
		 */
		case EVNT_FREQ:
			if (mu < clock_minstep)
				return (0);

			clock_frequency = direct_freq(fp_offset);
			/* fall through */

		/*
		 * We get here by default in FSET, SPIK and SYNC states.
		 * Here compute the frequency update due to PLL and FLL
		 * contributions. Note, we avoid frequency discipline at
		 * startup until the initial transient has subsided.
		 */
		default:
			if (freq_cnt == 0) {

				/*
				 * The FLL and PLL frequency gain constants
				 * depend on the time constant and Allan
				 * intercept. The PLL is always used, but
				 * becomes ineffective above the Allan intercept
				 * where the FLL becomes effective.
				 */
				if (sys_poll >= allan_xpt)
					clock_frequency +=
					      (fp_offset - clock_offset)
					    / ( max(ULOGTOD(sys_poll), mu)
					       * CLOCK_FLL);

				/*
				 * The PLL frequency gain (numerator) depends on
				 * the minimum of the update interval and Allan
				 * intercept. This reduces the PLL gain when the
				 * FLL becomes effective.
				 */
				etemp = min(ULOGTOD(allan_xpt), mu);
				dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
				clock_frequency +=
				    fp_offset * etemp / (dtemp * dtemp);
			}
			rstclock(EVNT_SYNC, fp_offset);
			if (fabs(fp_offset) < CLOCK_FLOOR)
				freq_cnt = 0;
			break;
		}
	}

#ifdef KERNEL_PLL
	/*
	 * This code segment works when clock adjustments are made using
	 * precision time kernel support and the ntp_adjtime() system
	 * call. This support is available in Solaris 2.6 and later,
	 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
	 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
	 * DECstation 5000/240 and Alpha AXP, additional kernel
	 * modifications provide a true microsecond clock and nanosecond
	 * clock, respectively.
	 *
	 * Important note: The kernel discipline is used only if the
	 * step threshold is less than 0.5 s, as anything higher can
	 * lead to overflow problems. This might occur if some misguided
	 * lad set the step threshold to something ridiculous.
	 */
	if (pll_control && kern_enable && freq_cnt == 0) {

		/*
		 * We initialize the structure for the ntp_adjtime()
		 * system call. We have to convert everything to
		 * microseconds or nanoseconds first. Do not update the
		 * system variables if the ext_enable flag is set. In
		 * this case, the external clock driver will update the
		 * variables, which will be read later by the local
		 * clock driver. Afterwards, remember the time and
		 * frequency offsets for jitter and stability values and
		 * to update the frequency file.
		 */
		ZERO(ntv);
		if (ext_enable) {
			ntv.modes = MOD_STATUS;
		} else {
			ntv.modes = MOD_BITS;
			ntv.offset = var_long_from_dbl(
			    clock_offset, &ntv.modes);
#ifdef STA_NANO
			ntv.constant = sys_poll;
#else /* STA_NANO */
			ntv.constant = sys_poll - 4;
#endif /* STA_NANO */
			if (ntv.constant < 0)
				ntv.constant = 0;

			ntv.esterror = usec_long_from_dbl(
				clock_jitter);
			ntv.maxerror = usec_long_from_dbl(
				sys_rootdelay / 2 + sys_rootdisp);
			ntv.status = STA_PLL;

			/*
			 * Enable/disable the PPS if requested.
			 */
			if (hardpps_enable) {
				ntv.status |= (STA_PPSTIME | STA_PPSFREQ);
				if (!(pll_status & STA_PPSTIME))
					sync_status("PPS enabled",
						pll_status,
						ntv.status);
			} else {
				ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ);
				if (pll_status & STA_PPSTIME)
					sync_status("PPS disabled",
						pll_status,
						ntv.status);
			}
			if (sys_leap == LEAP_ADDSECOND)
				ntv.status |= STA_INS;
			else if (sys_leap == LEAP_DELSECOND)
				ntv.status |= STA_DEL;
		}

		/*
		 * Pass the stuff to the kernel. If it squeals, turn off
		 * the pps. In any case, fetch the kernel offset,
		 * frequency and jitter.
		 */
		ntp_adj_ret = ntp_adjtime(&ntv);
		/*
		 * A squeal is a return status < 0, or a state change.
		 */
		if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) {
			kernel_status = ntp_adj_ret;
			ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1);
		}
		pll_status = ntv.status;
		clock_offset = dbl_from_var_long(ntv.offset, ntv.status);
		clock_frequency = FREQTOD(ntv.freq);

		/*
		 * If the kernel PPS is lit, monitor its performance.
		 */
		if (ntv.status & STA_PPSTIME) {
			clock_jitter = dbl_from_var_long(
				ntv.jitter, ntv.status);
		}

#if defined(STA_NANO) && NTP_API == 4
		/*
		 * If the TAI changes, update the kernel TAI.
		 */
		if (loop_tai != sys_tai) {
			loop_tai = sys_tai;
			ntv.modes = MOD_TAI;
			ntv.constant = sys_tai;
			if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
			    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1);
			}
		}
#endif /* STA_NANO */
	}
#endif /* KERNEL_PLL */

	/*
	 * Clamp the frequency within the tolerance range and calculate
	 * the frequency difference since the last update.
	 */
	if (fabs(clock_frequency) > NTP_MAXFREQ)
		msyslog(LOG_NOTICE,
		    "frequency error %.0f PPM exceeds tolerance %.0f PPM",
		    clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
	dtemp = SQUARE(clock_frequency - drift_comp);
	if (clock_frequency > NTP_MAXFREQ)
		drift_comp = NTP_MAXFREQ;
	else if (clock_frequency < -NTP_MAXFREQ)
		drift_comp = -NTP_MAXFREQ;
	else
		drift_comp = clock_frequency;

	/*
	 * Calculate the wander as the exponentially weighted RMS
	 * frequency differences. Record the change for the frequency
	 * file update.
	 */
	etemp = SQUARE(clock_stability);
	clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);

	/*
	 * Here we adjust the time constant by comparing the current
	 * offset with the clock jitter. If the offset is less than the
	 * clock jitter times a constant, then the averaging interval is
	 * increased, otherwise it is decreased. A bit of hysteresis
	 * helps calm the dance. Works best using burst mode. Don't
	 * fiddle with the poll during the startup clamp period.
	 * [Bug 3615] also observe time gates to avoid eager stepping
	 */
	if (freq_cnt > 0) {
		tc_counter = 0;
		tc_twinlo  = current_time; 
		tc_twinhi  = current_time; 
	} else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
		tc_counter += sys_poll;
		if (tc_counter > CLOCK_LIMIT) {
			tc_counter = CLOCK_LIMIT;
			if (sys_poll < peer->maxpoll)
				sys_poll += (current_time >= tc_twinhi);
		}
	} else {
		tc_counter -= sys_poll << 1;
		if (tc_counter < -CLOCK_LIMIT) {
			tc_counter = -CLOCK_LIMIT;
			if (sys_poll > peer->minpoll)
				sys_poll -= (current_time >= tc_twinlo);
		}
	}

	/*
	 * If the time constant has changed, update the poll variables.
	 *
	 * [bug 3615] also set new time gates
	 * The time limit for stepping down will be half the TC interval
	 * or 60 secs from now, whatever is bigger, and the step up time
	 * limit will be half the TC interval after the step down limit.
	 *
	 * The 'sys_poll' value affects the servo loop gain, and
	 * overshooting sys_poll slows it down unnecessarily.  Stepping
	 * down too fast also has bad effects.
	 *
	 * The 'tc_counter' dance itself is something that *should*
	 * happen *once* every (1 << sys_poll) seconds, I think, but
	 * that's not how it works right now, and adding time guards
	 * seems the least intrusive way to handle this.
	 */
	if (osys_poll != sys_poll) {
		u_int deadband = 1u << (sys_poll - 1);
		tc_counter = 0;
		tc_twinlo  = current_time + max(deadband, 60);
		tc_twinhi  = tc_twinlo + deadband;
		poll_update(peer, sys_poll, 0);
	}

	/*
	 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
	 */
	record_loop_stats(clock_offset, drift_comp, clock_jitter,
	    clock_stability, sys_poll);
	DPRINTF(1, ("local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n",
		    clock_offset, clock_jitter, drift_comp * 1e6,
		    clock_stability * 1e6, sys_poll));
	return (rval);
#endif /* not LOCKCLOCK */
}


/*
 * adj_host_clock - Called once every second to update the local clock.
 *
 * LOCKCLOCK: The only thing this routine does is increment the
 * sys_rootdisp variable.
 */
void
adj_host_clock(
	void
	)
{
	double	offset_adj;
	double	freq_adj;

	/*
	 * Update the dispersion since the last update. In contrast to
	 * NTPv3, NTPv4 does not declare unsynchronized after one day,
	 * since the dispersion check serves this function. Also,
	 * since the poll interval can exceed one day, the old test
	 * would be counterproductive. During the startup clamp period, the
	 * time constant is clamped at 2.
	 */
	sys_rootdisp += clock_phi;
#ifndef LOCKCLOCK
	if (!ntp_enable || mode_ntpdate)
		return;
	/*
	 * Determine the phase adjustment. The gain factor (denominator)
	 * increases with poll interval, so is dominated by the FLL
	 * above the Allan intercept. Note the reduced time constant at
	 * startup.
	 */
	if (state != EVNT_SYNC) {
		offset_adj = 0.;
	} else if (freq_cnt > 0) {
		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1));
		freq_cnt--;
#ifdef KERNEL_PLL
	} else if (pll_control && kern_enable) {
		offset_adj = 0.;
#endif /* KERNEL_PLL */
	} else {
		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
	}

	/*
	 * If the kernel discipline is enabled the frequency correction
	 * drift_comp has already been engaged via ntp_adjtime() in
	 * set_freq().  Otherwise it is a component of the adj_systime()
	 * offset.
	 */
#ifdef KERNEL_PLL
	if (pll_control && kern_enable)
		freq_adj = 0.;
	else
#endif /* KERNEL_PLL */
		freq_adj = drift_comp;

	/* Bound absolute value of total adjustment to NTP_MAXFREQ. */
	if (offset_adj + freq_adj > NTP_MAXFREQ)
		offset_adj = NTP_MAXFREQ - freq_adj;
	else if (offset_adj + freq_adj < -NTP_MAXFREQ)
		offset_adj = -NTP_MAXFREQ - freq_adj;

	clock_offset -= offset_adj;
	/*
	 * Windows port adj_systime() must be called each second,
	 * even if the argument is zero, to ease emulation of
	 * adjtime() using Windows' slew API which controls the rate
	 * but does not automatically stop slewing when an offset
	 * has decayed to zero.
	 */
	DEBUG_INSIST(enable_panic_check == TRUE);
	enable_panic_check = FALSE;
	adj_systime(offset_adj + freq_adj);
	enable_panic_check = TRUE;
#endif /* LOCKCLOCK */
}


/*
 * Clock state machine. Enter new state and set state variables.
 */
static void
rstclock(
	int	trans,		/* new state */
	double	offset		/* new offset */
	)
{
	DPRINTF(2, ("rstclock: mu %lu state %d poll %d count %d\n",
		    current_time - clock_epoch, trans, sys_poll,
		    tc_counter));
	if (trans != state && trans != EVNT_FSET)
		report_event(trans, NULL, NULL);
	state = trans;
	last_offset = clock_offset = offset;
	clock_epoch = current_time;
}


/*
 * calc_freq - calculate frequency directly
 *
 * This is very carefully done. When the offset is first computed at the
 * first update, a residual frequency component results. Subsequently,
 * updates are suppresed until the end of the measurement interval while
 * the offset is amortized. At the end of the interval the frequency is
 * calculated from the current offset, residual offset, length of the
 * interval and residual frequency component. At the same time the
 * frequenchy file is armed for update at the next hourly stats.
 */
static double
direct_freq(
	double	fp_offset
	)
{
	set_freq(fp_offset / (current_time - clock_epoch));

	return drift_comp;
}


/*
 * set_freq - set clock frequency correction
 *
 * Used to step the frequency correction at startup, possibly again once
 * the frequency is measured (that is, transitioning from EVNT_NSET to
 * EVNT_FSET), and finally to switch between daemon and kernel loop
 * discipline at runtime.
 *
 * When the kernel loop discipline is available but the daemon loop is
 * in use, the kernel frequency correction is disabled (set to 0) to
 * ensure drift_comp is applied by only one of the loops.
 */
static void
set_freq(
	double	freq		/* frequency update */
	)
{
	const char *	loop_desc;
	int ntp_adj_ret;

	(void)ntp_adj_ret; /* not always used below... */
	drift_comp = freq;
	loop_desc = "ntpd";
#ifdef KERNEL_PLL
	if (pll_control) {
		ZERO(ntv);
		ntv.modes = MOD_FREQUENCY;
		if (kern_enable) {
			loop_desc = "kernel";
			ntv.freq = DTOFREQ(drift_comp);
		}
		if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
		    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
		}
	}
#endif /* KERNEL_PLL */
	mprintf_event(EVNT_FSET, NULL, "%s %.3f PPM", loop_desc,
	    drift_comp * 1e6);
}


#ifdef KERNEL_PLL
static void
start_kern_loop(void)
{
	static int atexit_done;
	int ntp_adj_ret;

	pll_control = TRUE;
	ZERO(ntv);
	ntv.modes = MOD_BITS;
	ntv.status = STA_PLL | STA_UNSYNC;
	ntv.maxerror = MAXDISPERSE * 1.0e6;
	ntv.esterror = MAXDISPERSE * 1.0e6;
	ntv.constant = sys_poll;
	/*             ^^^^^^^^ why is it that here constant is
	 * unconditionally set to sys_poll, whereas elsewhere is is
	 * modified depending on nanosecond vs. microsecond kernel?
	 */
#ifdef SIGSYS
	/*
	 * Use sigsetjmp() to save state and then call ntp_adjtime(); if
	 * it fails, then pll_trap() will set pll_control FALSE before
	 * returning control using siglogjmp().
	 */
	newsigsys.sa_handler = pll_trap;
	newsigsys.sa_flags = 0;
	if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
		msyslog(LOG_ERR, "sigaction() trap SIGSYS: %m");
		pll_control = FALSE;
	} else {
		if (sigsetjmp(env, 1) == 0) {
			if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
			    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
			}
		}
		if (sigaction(SIGSYS, &sigsys, NULL)) {
			msyslog(LOG_ERR,
			    "sigaction() restore SIGSYS: %m");
			pll_control = FALSE;
		}
	}
#else /* SIGSYS */
	if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) {
	    ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1);
	}
#endif /* SIGSYS */

	/*
	 * Save the result status and light up an external clock
	 * if available.
	 */
	pll_status = ntv.status;
	if (pll_control) {
		if (!atexit_done) {
			atexit_done = TRUE;
			atexit(&stop_kern_loop);
		}
#ifdef STA_NANO
		if (pll_status & STA_CLK)
			ext_enable = TRUE;
#endif /* STA_NANO */
		report_event(EVNT_KERN, NULL,
	  	    "kernel time sync enabled");
	}
}
#endif	/* KERNEL_PLL */


#ifdef KERNEL_PLL
static void
stop_kern_loop(void)
{
	if (pll_control && kern_enable)
		report_event(EVNT_KERN, NULL,
		    "kernel time sync disabled");
}
#endif	/* KERNEL_PLL */


/*
 * select_loop() - choose kernel or daemon loop discipline.
 */
void
select_loop(
	int	use_kern_loop
	)
{
	if (kern_enable == use_kern_loop)
		return;
#ifdef KERNEL_PLL
	if (pll_control && !use_kern_loop)
		stop_kern_loop();
#endif
	kern_enable = use_kern_loop;
#ifdef KERNEL_PLL
	if (pll_control && use_kern_loop)
		start_kern_loop();
#endif
	/*
	 * If this loop selection change occurs after initial startup,
	 * call set_freq() to switch the frequency compensation to or
	 * from the kernel loop.
	 */
#ifdef KERNEL_PLL
	if (pll_control && loop_started)
		set_freq(drift_comp);
#endif
}


/*
 * huff-n'-puff filter
 */
void
huffpuff(void)
{
	int i;

	if (sys_huffpuff == NULL)
		return;

	sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
	sys_huffpuff[sys_huffptr] = 1e9;
	sys_mindly = 1e9;
	for (i = 0; i < sys_hufflen; i++) {
		if (sys_huffpuff[i] < sys_mindly)
			sys_mindly = sys_huffpuff[i];
	}
}


/*
 * loop_config - configure the loop filter
 *
 * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
 */
void
loop_config(
	int	item,
	double	freq
	)
{
	int	i;
	double	ftemp;

	DPRINTF(2, ("loop_config: item %d freq %f\n", item, freq));
	switch (item) {

	/*
	 * We first assume the kernel supports the ntp_adjtime()
	 * syscall. If that syscall works, initialize the kernel time
	 * variables. Otherwise, continue leaving no harm behind.
	 */
	case LOOP_DRIFTINIT:
#ifndef LOCKCLOCK
#ifdef KERNEL_PLL
		if (mode_ntpdate)
			break;

		start_kern_loop();
#endif /* KERNEL_PLL */

		/*
		 * Initialize frequency if given; otherwise, begin frequency
		 * calibration phase.
		 */
		ftemp = init_drift_comp / 1e6;
		if (ftemp > NTP_MAXFREQ)
			ftemp = NTP_MAXFREQ;
		else if (ftemp < -NTP_MAXFREQ)
			ftemp = -NTP_MAXFREQ;
		set_freq(ftemp);
		if (freq_set)
			rstclock(EVNT_FSET, 0);
		else
			rstclock(EVNT_NSET, 0);
		loop_started = TRUE;
#endif /* LOCKCLOCK */
		break;

	case LOOP_KERN_CLEAR:
#if 0		/* XXX: needs more review, and how can we get here? */
#ifndef LOCKCLOCK
# ifdef KERNEL_PLL
		if (pll_control && kern_enable) {
			memset((char *)&ntv, 0, sizeof(ntv));
			ntv.modes = MOD_STATUS;
			ntv.status = STA_UNSYNC;
			ntp_adjtime(&ntv);
			sync_status("kernel time sync disabled",
				pll_status,
				ntv.status);
		   }
# endif /* KERNEL_PLL */
#endif /* LOCKCLOCK */
#endif
		break;

	/*
	 * Tinker command variables for Ulrich Windl. Very dangerous.
	 */
	case LOOP_ALLAN:	/* Allan intercept (log2) (allan) */
		allan_xpt = (u_char)freq;
		break;

	case LOOP_CODEC:	/* audio codec frequency (codec) */
		clock_codec = freq / 1e6;
		break;

	case LOOP_PHI:		/* dispersion threshold (dispersion) */
		clock_phi = freq / 1e6;
		break;

	case LOOP_FREQ:		/* initial frequency (freq) */
		init_drift_comp = freq;
		freq_set++;
		break;

	case LOOP_HUFFPUFF:	/* huff-n'-puff length (huffpuff) */
		if (freq < HUFFPUFF)
			freq = HUFFPUFF;
		sys_hufflen = (int)(freq / HUFFPUFF);
		sys_huffpuff = eallocarray(sys_hufflen, sizeof(sys_huffpuff[0]));
		for (i = 0; i < sys_hufflen; i++)
			sys_huffpuff[i] = 1e9;
		sys_mindly = 1e9;
		break;

	case LOOP_PANIC:	/* panic threshold (panic) */
		clock_panic = freq;
		break;

	case LOOP_MAX:		/* step threshold (step) */
		clock_max_fwd = clock_max_back = freq;
		if (freq == 0 || freq > 0.5)
			select_loop(FALSE);
		break;

	case LOOP_MAX_BACK:	/* step threshold (step) */
		clock_max_back = freq;
		/*
		 * Leave using the kernel discipline code unless both
		 * limits are massive.  This assumes the reason to stop
		 * using it is that it's pointless, not that it goes wrong.
		 */
		if (  (clock_max_back == 0 || clock_max_back > 0.5)
		   || (clock_max_fwd  == 0 || clock_max_fwd  > 0.5))
			select_loop(FALSE);
		break;

	case LOOP_MAX_FWD:	/* step threshold (step) */
		clock_max_fwd = freq;
		if (  (clock_max_back == 0 || clock_max_back > 0.5)
		   || (clock_max_fwd  == 0 || clock_max_fwd  > 0.5))
			select_loop(FALSE);
		break;

	case LOOP_MINSTEP:	/* stepout threshold (stepout) */
		if (freq < CLOCK_MINSTEP)
			clock_minstep = CLOCK_MINSTEP;
		else
			clock_minstep = freq;
		break;

	case LOOP_TICK:		/* tick increment (tick) */
		set_sys_tick_precision(freq);
		break;

	case LOOP_LEAP:		/* not used, fall through */
	default:
		msyslog(LOG_NOTICE,
		    "loop_config: unsupported option %d", item);
	}
}


#if defined(KERNEL_PLL) && defined(SIGSYS)
/*
 * _trap - trap processor for undefined syscalls
 *
 * This nugget is called by the kernel when the SYS_ntp_adjtime()
 * syscall bombs because the silly thing has not been implemented in
 * the kernel. In this case the phase-lock loop is emulated by
 * the stock adjtime() syscall and a lot of indelicate abuse.
 */
static RETSIGTYPE
pll_trap(
	int arg
	)
{
	pll_control = FALSE;
	siglongjmp(env, 1);
}
#endif /* KERNEL_PLL && SIGSYS */