Training courses

Kernel and Embedded Linux

Bootlin training courses

Embedded Linux, kernel,
Yocto Project, Buildroot, real-time,
graphics, boot time, debugging...

Bootlin logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
/*-
 * Copyright (c) 2018 VMware, Inc.
 *
 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
 *
 * $FreeBSD$
 */

#ifndef _VMCI_DEFS_H_
#define _VMCI_DEFS_H_

#include <sys/types.h>
#include <machine/atomic.h>

#include "vmci_kernel_defs.h"

#pragma GCC diagnostic ignored "-Wcast-qual"

/* Register offsets. */
#define VMCI_STATUS_ADDR		0x00
#define VMCI_CONTROL_ADDR		0x04
#define VMCI_ICR_ADDR			0x08
#define VMCI_IMR_ADDR			0x0c
#define VMCI_DATA_OUT_ADDR		0x10
#define VMCI_DATA_IN_ADDR		0x14
#define VMCI_CAPS_ADDR			0x18
#define VMCI_RESULT_LOW_ADDR		0x1c
#define VMCI_RESULT_HIGH_ADDR		0x20

/* Status register bits. */
#define VMCI_STATUS_INT_ON		0x1

/* Control register bits. */
#define VMCI_CONTROL_RESET		0x1
#define VMCI_CONTROL_INT_ENABLE		0x2
#define VMCI_CONTROL_INT_DISABLE	0x4

/* Capabilities register bits. */
#define VMCI_CAPS_HYPERCALL		0x1
#define VMCI_CAPS_GUESTCALL		0x2
#define VMCI_CAPS_DATAGRAM		0x4
#define VMCI_CAPS_NOTIFICATIONS		0x8

/* Interrupt Cause register bits. */
#define VMCI_ICR_DATAGRAM		0x1
#define VMCI_ICR_NOTIFICATION		0x2

/* Interrupt Mask register bits. */
#define VMCI_IMR_DATAGRAM		0x1
#define VMCI_IMR_NOTIFICATION		0x2

/* Interrupt type. */
typedef enum vmci_intr_type {
	VMCI_INTR_TYPE_INTX =	0,
	VMCI_INTR_TYPE_MSI =	1,
	VMCI_INTR_TYPE_MSIX =	2
} vmci_intr_type;

/*
 * Maximum MSI/MSI-X interrupt vectors in the device.
 */
#define VMCI_MAX_INTRS			2

/*
 * Supported interrupt vectors. There is one for each ICR value above,
 * but here they indicate the position in the vector array/message ID.
 */
#define VMCI_INTR_DATAGRAM		0
#define VMCI_INTR_NOTIFICATION		1

/*
 * A single VMCI device has an upper limit of 128 MiB on the amount of
 * memory that can be used for queue pairs.
 */
#define VMCI_MAX_GUEST_QP_MEMORY	(128 * 1024 * 1024)

/*
 * We have a fixed set of resource IDs available in the VMX.
 * This allows us to have a very simple implementation since we statically
 * know how many will create datagram handles. If a new caller arrives and
 * we have run out of slots we can manually increment the maximum size of
 * available resource IDs.
 */

typedef uint32_t vmci_resource;

/* VMCI reserved hypervisor datagram resource IDs. */
#define VMCI_RESOURCES_QUERY		0
#define VMCI_GET_CONTEXT_ID		1
#define VMCI_SET_NOTIFY_BITMAP		2
#define VMCI_DOORBELL_LINK		3
#define VMCI_DOORBELL_UNLINK		4
#define VMCI_DOORBELL_NOTIFY		5
/*
 * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
 * obsoleted by the removal of VM to VM communication.
 */
#define VMCI_DATAGRAM_REQUEST_MAP	6
#define VMCI_DATAGRAM_REMOVE_MAP	7
#define VMCI_EVENT_SUBSCRIBE		8
#define VMCI_EVENT_UNSUBSCRIBE		9
#define VMCI_QUEUEPAIR_ALLOC		10
#define VMCI_QUEUEPAIR_DETACH		11
/*
 * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
 * WS 7.0/7.1 and ESX 4.1
 */
#define VMCI_HGFS_TRANSPORT		13
#define VMCI_UNITY_PBRPC_REGISTER	14
/*
 * This resource is used for VMCI socket control packets sent to the
 * hypervisor (CID 0) because RID 1 is already reserved.
 */
#define VSOCK_PACKET_HYPERVISOR_RID	15
#define VMCI_RESOURCE_MAX		16
/*
 * The core VMCI device functionality only requires the resource IDs of
 * VMCI_QUEUEPAIR_DETACH and below.
 */
#define VMCI_CORE_DEVICE_RESOURCE_MAX	VMCI_QUEUEPAIR_DETACH

/*
 * VMCI reserved host datagram resource IDs.
 * vsock control channel has resource id 1.
 */
#define VMCI_DVFILTER_DATA_PATH_DATAGRAM	2

/* VMCI Ids. */
typedef uint32_t vmci_id;

struct vmci_id_range {
	int8_t	action;	/* VMCI_FA_X, for use in filters. */
	vmci_id	begin;	/* Beginning of range. */
	vmci_id	end;	/* End of range. */
};

struct vmci_handle {
	vmci_id	context;
	vmci_id	resource;
};

static inline struct vmci_handle
VMCI_MAKE_HANDLE(vmci_id cid, vmci_id rid)
{
	struct vmci_handle h;

	h.context = cid;
	h.resource = rid;
	return (h);
}

#define VMCI_HANDLE_TO_CONTEXT_ID(_handle)				\
	((_handle).context)
#define VMCI_HANDLE_TO_RESOURCE_ID(_handle)				\
	((_handle).resource)
#define VMCI_HANDLE_EQUAL(_h1, _h2)					\
	((_h1).context == (_h2).context && (_h1).resource == (_h2).resource)

#define VMCI_INVALID_ID			0xFFFFFFFF
static const struct vmci_handle VMCI_INVALID_HANDLE = {VMCI_INVALID_ID,
	    VMCI_INVALID_ID};

#define VMCI_HANDLE_INVALID(_handle)					\
	VMCI_HANDLE_EQUAL((_handle), VMCI_INVALID_HANDLE)

/*
 * The below defines can be used to send anonymous requests.
 * This also indicates that no response is expected.
 */
#define VMCI_ANON_SRC_CONTEXT_ID					\
	VMCI_INVALID_ID
#define VMCI_ANON_SRC_RESOURCE_ID					\
	VMCI_INVALID_ID
#define VMCI_ANON_SRC_HANDLE						\
	VMCI_MAKE_HANDLE(VMCI_ANON_SRC_CONTEXT_ID,			\
	VMCI_ANON_SRC_RESOURCE_ID)

/* The lowest 16 context ids are reserved for internal use. */
#define VMCI_RESERVED_CID_LIMIT		16

/*
 * Hypervisor context id, used for calling into hypervisor
 * supplied services from the VM.
 */
#define VMCI_HYPERVISOR_CONTEXT_ID	0

/*
 * Well-known context id, a logical context that contains a set of
 * well-known services. This context ID is now obsolete.
 */
#define VMCI_WELL_KNOWN_CONTEXT_ID	1

/*
 * Context ID used by host endpoints.
 */
#define VMCI_HOST_CONTEXT_ID		2
#define VMCI_HOST_CONTEXT_INVALID_EVENT	((uintptr_t)~0)

#define VMCI_CONTEXT_IS_VM(_cid)					\
	(VMCI_INVALID_ID != _cid && _cid > VMCI_HOST_CONTEXT_ID)

/*
 * The VMCI_CONTEXT_RESOURCE_ID is used together with VMCI_MAKE_HANDLE to make
 * handles that refer to a specific context.
 */
#define VMCI_CONTEXT_RESOURCE_ID	0

/*
 *------------------------------------------------------------------------------
 *
 * VMCI error codes.
 *
 *------------------------------------------------------------------------------
 */

#define VMCI_SUCCESS_QUEUEPAIR_ATTACH		5
#define VMCI_SUCCESS_QUEUEPAIR_CREATE		4
#define VMCI_SUCCESS_LAST_DETACH		3
#define VMCI_SUCCESS_ACCESS_GRANTED		2
#define VMCI_SUCCESS_ENTRY_DEAD			1
#define VMCI_SUCCESS				0LL
#define VMCI_ERROR_INVALID_RESOURCE		(-1)
#define VMCI_ERROR_INVALID_ARGS			(-2)
#define VMCI_ERROR_NO_MEM			(-3)
#define VMCI_ERROR_DATAGRAM_FAILED		(-4)
#define VMCI_ERROR_MORE_DATA			(-5)
#define VMCI_ERROR_NO_MORE_DATAGRAMS		(-6)
#define VMCI_ERROR_NO_ACCESS			(-7)
#define VMCI_ERROR_NO_HANDLE			(-8)
#define VMCI_ERROR_DUPLICATE_ENTRY		(-9)
#define VMCI_ERROR_DST_UNREACHABLE		(-10)
#define VMCI_ERROR_PAYLOAD_TOO_LARGE		(-11)
#define VMCI_ERROR_INVALID_PRIV			(-12)
#define VMCI_ERROR_GENERIC			(-13)
#define VMCI_ERROR_PAGE_ALREADY_SHARED		(-14)
#define VMCI_ERROR_CANNOT_SHARE_PAGE		(-15)
#define VMCI_ERROR_CANNOT_UNSHARE_PAGE		(-16)
#define VMCI_ERROR_NO_PROCESS			(-17)
#define VMCI_ERROR_NO_DATAGRAM			(-18)
#define VMCI_ERROR_NO_RESOURCES			(-19)
#define VMCI_ERROR_UNAVAILABLE			(-20)
#define VMCI_ERROR_NOT_FOUND			(-21)
#define VMCI_ERROR_ALREADY_EXISTS		(-22)
#define VMCI_ERROR_NOT_PAGE_ALIGNED		(-23)
#define VMCI_ERROR_INVALID_SIZE			(-24)
#define VMCI_ERROR_REGION_ALREADY_SHARED	(-25)
#define VMCI_ERROR_TIMEOUT			(-26)
#define VMCI_ERROR_DATAGRAM_INCOMPLETE		(-27)
#define VMCI_ERROR_INCORRECT_IRQL		(-28)
#define VMCI_ERROR_EVENT_UNKNOWN		(-29)
#define VMCI_ERROR_OBSOLETE			(-30)
#define VMCI_ERROR_QUEUEPAIR_MISMATCH		(-31)
#define VMCI_ERROR_QUEUEPAIR_NOTSET		(-32)
#define VMCI_ERROR_QUEUEPAIR_NOTOWNER		(-33)
#define VMCI_ERROR_QUEUEPAIR_NOTATTACHED	(-34)
#define VMCI_ERROR_QUEUEPAIR_NOSPACE		(-35)
#define VMCI_ERROR_QUEUEPAIR_NODATA		(-36)
#define VMCI_ERROR_BUSMEM_INVALIDATION		(-37)
#define VMCI_ERROR_MODULE_NOT_LOADED		(-38)
#define VMCI_ERROR_DEVICE_NOT_FOUND		(-39)
#define VMCI_ERROR_QUEUEPAIR_NOT_READY		(-40)
#define VMCI_ERROR_WOULD_BLOCK			(-41)

/* VMCI clients should return error code withing this range */
#define VMCI_ERROR_CLIENT_MIN			(-500)
#define VMCI_ERROR_CLIENT_MAX			(-550)

/* Internal error codes. */
#define VMCI_SHAREDMEM_ERROR_BAD_CONTEXT	(-1000)

#define VMCI_PATH_MAX				256

/* VMCI reserved events. */
typedef uint32_t vmci_event_type;

#define VMCI_EVENT_CTX_ID_UPDATE	0	// Only applicable to guest
						// endpoints
#define VMCI_EVENT_CTX_REMOVED		1	// Applicable to guest and host
#define VMCI_EVENT_QP_RESUMED		2	// Only applicable to guest
						// endpoints
#define VMCI_EVENT_QP_PEER_ATTACH	3	// Applicable to guest, host
						// and VMX
#define VMCI_EVENT_QP_PEER_DETACH	4	// Applicable to guest, host
						// and VMX
#define VMCI_EVENT_MEM_ACCESS_ON	5	// Applicable to VMX and vmk. On
						// vmk, this event has the
						// Context payload type
#define VMCI_EVENT_MEM_ACCESS_OFF	6	// Applicable to VMX and vmk.
						// Same as above for the payload
						// type
#define VMCI_EVENT_GUEST_PAUSED		7	// Applicable to vmk. This
						// event has the Context
						// payload type
#define VMCI_EVENT_GUEST_UNPAUSED	8	// Applicable to vmk. Same as
						// above for the payload type.
#define VMCI_EVENT_MAX			9

/*
 * Of the above events, a few are reserved for use in the VMX, and other
 * endpoints (guest and host kernel) should not use them. For the rest of the
 * events, we allow both host and guest endpoints to subscribe to them, to
 * maintain the same API for host and guest endpoints.
 */

#define VMCI_EVENT_VALID_VMX(_event)					\
	(_event == VMCI_EVENT_QP_PEER_ATTACH ||				\
	_event == VMCI_EVENT_QP_PEER_DETACH ||				\
	_event == VMCI_EVENT_MEM_ACCESS_ON ||				\
	_event == VMCI_EVENT_MEM_ACCESS_OFF)

#define VMCI_EVENT_VALID(_event)					\
	(_event < VMCI_EVENT_MAX &&					\
	_event != VMCI_EVENT_MEM_ACCESS_ON &&				\
	_event != VMCI_EVENT_MEM_ACCESS_OFF &&				\
	_event != VMCI_EVENT_GUEST_PAUSED &&				\
	_event != VMCI_EVENT_GUEST_UNPAUSED)

/* Reserved guest datagram resource ids. */
#define VMCI_EVENT_HANDLER		0

/*
 * VMCI coarse-grained privileges (per context or host process/endpoint. An
 * entity with the restricted flag is only allowed to interact with the
 * hypervisor and trusted entities.
 */
typedef uint32_t vmci_privilege_flags;

#define VMCI_PRIVILEGE_FLAG_RESTRICTED		0x01
#define VMCI_PRIVILEGE_FLAG_TRUSTED		0x02
#define VMCI_PRIVILEGE_ALL_FLAGS					\
	(VMCI_PRIVILEGE_FLAG_RESTRICTED | VMCI_PRIVILEGE_FLAG_TRUSTED)
#define VMCI_NO_PRIVILEGE_FLAGS			0x00
#define VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS	VMCI_NO_PRIVILEGE_FLAGS
#define VMCI_LEAST_PRIVILEGE_FLAGS		VMCI_PRIVILEGE_FLAG_RESTRICTED
#define VMCI_MAX_PRIVILEGE_FLAGS		VMCI_PRIVILEGE_FLAG_TRUSTED

/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
#define VMCI_RESERVED_RESOURCE_ID_MAX		1023

#define VMCI_DOMAIN_NAME_MAXLEN			32

#define VMCI_LGPFX				"vmci: "

/*
 * struct vmci_queue_header
 *
 * A Queue cannot stand by itself as designed. Each Queue's header contains a
 * pointer into itself (the producer_tail) and into its peer (consumer_head).
 * The reason for the separation is one of accessibility: Each end-point can
 * modify two things: where the next location to enqueue is within its produce_q
 * (producer_tail); and where the next dequeue location is in its consume_q
 * (consumer_head).
 *
 * An end-point cannot modify the pointers of its peer (guest to guest; NOTE
 * that in the host both queue headers are mapped r/w). But, each end-point
 * needs read access to both Queue header structures in order to determine how
 * much space is used (or left) in the Queue. This is because for an end-point
 * to know how full its produce_q is, it needs to use the consumer_head that
 * points into the produce_q but -that- consumer_head is in the Queue header
 * for that end-points consume_q.
 *
 * Thoroughly confused?  Sorry.
 *
 * producer_tail: the point to enqueue new entrants.  When you approach a line
 * in a store, for example, you walk up to the tail.
 *
 * consumer_head: the point in the queue from which the next element is
 * dequeued. In other words, who is next in line is he who is at the head of
 * the line.
 *
 * Also, producer_tail points to an empty byte in the Queue, whereas
 * consumer_head points to a valid byte of data (unless producer_tail ==
 * consumer_head in which case consumerHead does not point to a valid byte of
 * data).
 *
 * For a queue of buffer 'size' bytes, the tail and head pointers will be in
 * the range [0, size-1].
 *
 * If produce_q_header->producer_tail == consume_q_header->consumer_head then
 * the produce_q is empty.
 */
struct vmci_queue_header {
	/* All fields are 64bit and aligned. */
	struct vmci_handle	handle;		/* Identifier. */
	volatile uint64_t	producer_tail;	/* Offset in this queue. */
	volatile uint64_t	consumer_head;	/* Offset in peer queue. */
};

/*
 * If one client of a QueuePair is a 32bit entity, we restrict the QueuePair
 * size to be less than 4GB, and use 32bit atomic operations on the head and
 * tail pointers. 64bit atomic read on a 32bit entity involves cmpxchg8b which
 * is an atomic read-modify-write. This will cause traces to fire when a 32bit
 * consumer tries to read the producer's tail pointer, for example, because the
 * consumer has read-only access to the producer's tail pointer.
 *
 * We provide the following macros to invoke 32bit or 64bit atomic operations
 * based on the architecture the code is being compiled on.
 */

#ifdef __x86_64__
#define QP_MAX_QUEUE_SIZE_ARCH		CONST64U(0xffffffffffffffff)
#define qp_atomic_read_offset(x)	atomic_load_64(x)
#define qp_atomic_write_offset(x, y)	atomic_store_64(x, y)
#else /* __x86_64__ */
	/*
	 * Wrappers below are being used because atomic_store_<type> operates
	 * on a specific <type>. Likewise for atomic_load_<type>
	 */

	static inline uint32_t
	type_safe_atomic_read_32(void *var)
	{
		return (atomic_load_32((volatile uint32_t *)(var)));
	}

	static inline void
	type_safe_atomic_write_32(void *var, uint32_t val)
	{
		atomic_store_32((volatile uint32_t *)(var), (uint32_t)(val));
	}

#define QP_MAX_QUEUE_SIZE_ARCH		CONST64U(0xffffffff)
#define qp_atomic_read_offset(x)	type_safe_atomic_read_32((void *)(x))
#define qp_atomic_write_offset(x, y)					\
	type_safe_atomic_write_32((void *)(x), (uint32_t)(y))
#endif /* __x86_64__ */

/*
 *------------------------------------------------------------------------------
 *
 * qp_add_pointer --
 *
 *     Helper to add a given offset to a head or tail pointer. Wraps the value
 *     of the pointer around the max size of the queue.
 *
 * Results:
 *     None.
 *
 * Side effects:
 *     None.
 *
 *------------------------------------------------------------------------------
 */

static inline void
qp_add_pointer(volatile uint64_t *var, size_t add, uint64_t size)
{
	uint64_t new_val = qp_atomic_read_offset(var);

	if (new_val >= size - add)
		new_val -= size;

	new_val += add;
	qp_atomic_write_offset(var, new_val);
}

/*
 *------------------------------------------------------------------------------
 *
 * vmci_queue_header_producer_tail --
 *
 *     Helper routine to get the Producer Tail from the supplied queue.
 *
 * Results:
 *     The contents of the queue's producer tail.
 *
 * Side effects:
 *     None.
 *
 *------------------------------------------------------------------------------
 */

static inline uint64_t
vmci_queue_header_producer_tail(const struct vmci_queue_header *q_header)
{
	struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
	return (qp_atomic_read_offset(&qh->producer_tail));
}

/*
 *------------------------------------------------------------------------------
 *
 * vmci_queue_header_consumer_head --
 *
 *     Helper routine to get the Consumer Head from the supplied queue.
 *
 * Results:
 *     The contents of the queue's consumer tail.
 *
 * Side effects:
 *     None.
 *
 *------------------------------------------------------------------------------
 */

static inline uint64_t
vmci_queue_header_consumer_head(const struct vmci_queue_header *q_header)
{
	struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
	return (qp_atomic_read_offset(&qh->consumer_head));
}

/*
 *------------------------------------------------------------------------------
 *
 * vmci_queue_header_add_producer_tail --
 *
 *     Helper routine to increment the Producer Tail. Fundamentally,
 *     qp_add_pointer() is used to manipulate the tail itself.
 *
 * Results:
 *     None.
 *
 * Side effects:
 *     None.
 *
 *------------------------------------------------------------------------------
 */

static inline void
vmci_queue_header_add_producer_tail(struct vmci_queue_header *q_header,
    size_t add, uint64_t queue_size)
{

	qp_add_pointer(&q_header->producer_tail, add, queue_size);
}

/*
 *------------------------------------------------------------------------------
 *
 * vmci_queue_header_add_consumer_head --
 *
 *     Helper routine to increment the Consumer Head. Fundamentally,
 *     qp_add_pointer() is used to manipulate the head itself.
 *
 * Results:
 *     None.
 *
 * Side effects:
 *     None.
 *
 *------------------------------------------------------------------------------
 */

static inline void
vmci_queue_header_add_consumer_head(struct vmci_queue_header *q_header,
    size_t add, uint64_t queue_size)
{

	qp_add_pointer(&q_header->consumer_head, add, queue_size);
}

/*
 *------------------------------------------------------------------------------
 *
 * vmci_queue_header_get_pointers --
 *
 *     Helper routine for getting the head and the tail pointer for a queue.
 *     Both the VMCIQueues are needed to get both the pointers for one queue.
 *
 * Results:
 *     None.
 *
 * Side effects:
 *     None.
 *
 *------------------------------------------------------------------------------
 */

static inline void
vmci_queue_header_get_pointers(const struct vmci_queue_header *produce_q_header,
    const struct vmci_queue_header *consume_q_header, uint64_t *producer_tail,
    uint64_t *consumer_head)
{

	if (producer_tail)
		*producer_tail =
		    vmci_queue_header_producer_tail(produce_q_header);

	if (consumer_head)
		*consumer_head =
		    vmci_queue_header_consumer_head(consume_q_header);
}

/*
 *------------------------------------------------------------------------------
 *
 * vmci_queue_header_reset_pointers --
 *
 *     Reset the tail pointer (of "this" queue) and the head pointer (of "peer"
 *     queue).
 *
 * Results:
 *     None.
 *
 * Side effects:
 *     None.
 *
 *------------------------------------------------------------------------------
 */

static inline void
vmci_queue_header_reset_pointers(struct vmci_queue_header *q_header)
{

	qp_atomic_write_offset(&q_header->producer_tail, CONST64U(0));
	qp_atomic_write_offset(&q_header->consumer_head, CONST64U(0));
}

/*
 *------------------------------------------------------------------------------
 *
 * vmci_queue_header_init --
 *
 *     Initializes a queue's state (head & tail pointers).
 *
 * Results:
 *     None.
 *
 * Side effects:
 *     None.
 *
 *------------------------------------------------------------------------------
 */

static inline void
vmci_queue_header_init(struct vmci_queue_header *q_header,
    const struct vmci_handle handle)
{

	q_header->handle = handle;
	vmci_queue_header_reset_pointers(q_header);
}

/*
 *------------------------------------------------------------------------------
 *
 * vmci_queue_header_free_space --
 *
 *     Finds available free space in a produce queue to enqueue more data or
 *     reports an error if queue pair corruption is detected.
 *
 * Results:
 *     Free space size in bytes or an error code.
 *
 * Side effects:
 *     None.
 *
 *------------------------------------------------------------------------------
 */

static inline int64_t
vmci_queue_header_free_space(const struct vmci_queue_header *produce_q_header,
    const struct vmci_queue_header *consume_q_header,
    const uint64_t produce_q_size)
{
	uint64_t free_space;
	uint64_t head;
	uint64_t tail;

	tail = vmci_queue_header_producer_tail(produce_q_header);
	head = vmci_queue_header_consumer_head(consume_q_header);

	if (tail >= produce_q_size || head >= produce_q_size)
		return (VMCI_ERROR_INVALID_SIZE);

	/*
	 * Deduct 1 to avoid tail becoming equal to head which causes ambiguity.
	 * If head and tail are equal it means that the queue is empty.
	 */

	if (tail >= head)
		free_space = produce_q_size - (tail - head) - 1;
	else
		free_space = head - tail - 1;

	return (free_space);
}

/*
 *------------------------------------------------------------------------------
 *
 * vmci_queue_header_buf_ready --
 *
 *     vmci_queue_header_free_space() does all the heavy lifting of determing
 *     the number of free bytes in a Queue. This routine, then subtracts that
 *     size from the full size of the Queue so the caller knows how many bytes
 *     are ready to be dequeued.
 *
 * Results:
 *     On success, available data size in bytes (up to MAX_INT64).
 *     On failure, appropriate error code.
 *
 * Side effects:
 *     None.
 *
 *------------------------------------------------------------------------------
 */

static inline int64_t
vmci_queue_header_buf_ready(const struct vmci_queue_header *consume_q_header,
    const struct vmci_queue_header *produce_q_header,
    const uint64_t consume_q_size)
{
	int64_t free_space;

	free_space = vmci_queue_header_free_space(consume_q_header,
	    produce_q_header, consume_q_size);
	if (free_space < VMCI_SUCCESS)
		return (free_space);
	else
		return (consume_q_size - free_space - 1);
}

#endif /* !_VMCI_DEFS_H_ */