--- /dev/null
+#
+# Acess2 VirtIO driver
+#
+
+OBJ = virtio.o
+NAME = VirtIO
+
+include ../../Makefile.tpl
+
--- /dev/null
+/*
+ * Acess2 Kernel Common
+ * - By John Hodge (thePowersGang)
+ *
+ * virtio.h
+ * - VirtIO Common Header
+ *
+ * Reference: LinuxKernel:/Documentation/virtual/virtio-spec.txt
+ */
+#ifndef _VIRTIO__VIRTIO_H_
+#define _VIRTIO__VIRTIO_H_
+
+typedef struct sVirtIO_Dev tVirtIO_Dev;
+typedef struct sVirtIO_Buf tVirtIO_Buf;
+
+typedef enum eVirtIO_DeviceClasses tVirtIO_DeviceClass;
+
+/**
+ * Function called when a queue entry is passed to the guest
+ *
+ * \param Index Index of the entry in the queue
+ */
+typedef int (*tVirtIO_QueueCallback)(tVirtIO_Dev *Dev, int Index, size_t UsedBytes, void *Handle);
+
+#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
+#define VIRTIO_F_RING_INDIRECT_DESC (1 << 28)
+#define VIRTIO_F_RING_EVENT_IDX (1 << 29)
+
+enum eVirtIO_DeviceClasses
+{
+ VIRTIO_DEVCLASS_NETWORK,
+};
+
+// === FUNCTIONS ===
+extern tVirtIO_Dev *VirtIO_InitDev(Uint16 IOBase, Uint IRQ, Uint32 Features, int NQueues, size_t DataSize);
+extern Uint32 VirtIO_GetFeatures(tVirtIO_Dev *Dev);
+extern Uint32 VirtIO_GetDevConfig(tVirtIO_Dev *Dev, int Size, Uint8 Offset);
+extern void *VirtIO_GetDataPtr(tVirtIO_Dev *Dev);
+extern void VirtIO_RemoveDev(tVirtIO_Dev *Dev);
+/**
+ * \brief Sets the Queue Callback
+ *
+ * The queue callback is called when the device writes an entry to the used ring.
+ *
+ * \param NoAutoRel Keep descriptors in ring buffer until explicitly popped and released
+ */
+extern int VirtIO_SetQueueCallback(tVirtIO_Dev *Dev, int QueueID, tVirtIO_QueueCallback Callback, int NoAutoRel);
+extern tVirtIO_Buf *VirtIO_SendBuffers(tVirtIO_Dev *Dev, int QueueID, int nBufs, size_t Sizes[], const void *Ptrs[], void *Handle);
+extern tVirtIO_Buf *VirtIO_ReceiveBuffer(tVirtIO_Dev *Dev, int QueueID, size_t Size, void *Ptr, void *Handle);
+extern tVirtIO_Buf *VirtIO_PopBuffer(tVirtIO_Dev *Dev, int QueueID, size_t *Size, const void **Ptr);\
+/**
+ * \brief Get the next buffer in a chain
+ */
+extern tVirtIO_Buf *VirtIO_GetNextBuffer(tVirtIO_Buf *Buf);
+/**
+ * \brief Get the registered data pointer for this buffer
+ * \note This may not be what you want. Care should be taken that this function is called from the correct address space.
+ */
+extern const void *VirtIO_GetBufferPtr(tVirtIO_Buf *Buf, size_t *Size);
+/**
+ * \brief Get the device for a buffer
+ */
+extern tVirtIO_Dev *VirtIO_GetBufferDev(tVirtIO_Buf *Buf);
+/**
+ * \brief Release all qdescs associated with a buffer into the free pool
+ */
+extern void VirtIO_ReleaseBuffer(tVirtIO_Buf *Buffer);
+
+#endif
+
+
--- /dev/null
+/*
+ * Acess2 VirtIO Common
+ * - By John Hodge (thePowersGang)
+ *
+ * virtio_hw.h
+ * - VirtIO Hardware Header
+ *
+ * Reference: LinuxKernel:/Documentation/virtual/virtio-spec.txt
+ */
+#ifndef _VIRTIO__VIRTIO_HW_H_
+#define _VIRTIO__VIRTIO_HW_H_
+
+//#if _MODULE_NAME_ != "VirtIOCommon"
+//#error "This header is a VirtIO internal"
+//#endif
+
+#if 0
+typedef struct sVirtIO_Header tVirtIO_Header;
+
+struct sVirtIO_Header
+{
+ volatile Uint32 DeviceFeatures; // R
+ volatile Uint32 GuestFeatures; // RW
+ Uint32 QueueAddress; // RW
+ volatile Uint16 QueueSize; // R
+ volatile Uint16 QueueSelect; // RW
+ volatile Uint16 QueueNotify; // RW
+ volatile Uint8 DeviceStatus; // RW
+ volatile Uint8 ISRStatus; // R
+};
+#endif
+enum eVirtIO_IOAddrs
+{
+ VIRTIO_REG_DEVFEAT = 0x00, // R
+ VIRTIO_REG_GUESTFEAT = 0x04, // RW
+ VIRTIO_REG_QUEUEADDR = 0x08, // RW
+ VIRTIO_REG_QUEUESIZE = 0x0C, // R
+ VIRTIO_REG_QUEUESELECT = 0x0E, // RW
+ VIRTIO_REG_QUEUENOTIFY = 0x10, // RW
+ VIRTIO_REG_DEVSTS = 0x12, // RW
+ VIRTIO_REG_ISRSTS = 0x13, // R
+
+ VIRTIO_REG_DEVSPEC_0 = 0x14,
+
+ VIRTIO_REG_MSIX_CONFIGV = 0x14, // RW
+ VIRTIO_REG_MSIX_QUEUEV = 0x16, // RW
+
+ VIRTIO_REG_DEVSPEC_1 = 0x18,
+};
+
+enum eVirtIO_DeviceStatuses
+{
+ VIRTIO_DEVSTS_RESET = 0x00, // Reset device
+ VIRTIO_DEVSTS_ACKNOWLEDGE = 0x01, // Acknowledged device
+ VIRTIO_DEVSTS_DRIVER = 0x02, // Driver avaliable
+ VIRTIO_DEVSTS_DRIVER_OK = 0x04, // Driver initialised
+ VIRTIO_DEVSTS_FAILED = 0x80, // Something went wrong
+};
+
+enum eVirtIO_ISRBits
+{
+ VIRTIO_ISR_QUEUE = 0x01,
+};
+
+// VirtIO Ring Structure
+// +0 : Ring descriptors (given by tVirtIO_Header::QueueSize)
+// +QS*16 : Avaliable ring
+// +PAD[4096] : Used ring
+
+#define VRING_AVAIL_F_NO_INTERRUPT 0x1
+
+struct sVirtIO_AvailRing
+{
+ // [0]: Disable IRQ on descriptor use
+ Uint16 Flags;
+ Uint16 Idx;
+ Uint16 Ring[]; // tVirtIO_Header::QueueSize entries
+ //Uint16 UsedEvent;
+};
+
+struct sVirtIO_UsedRing
+{
+ // [0]: Do not notify when descriptors added
+ Uint16 Flags;
+ Uint16 Idx;
+ struct {
+ Uint32 ID;
+ Uint32 Len;
+ } Ring[];
+ // Uint16 AvailEvent;
+};
+
+#define VRING_DESC_F_NEXT 0x1
+#define VRING_DESC_F_WRITE 0x2
+#define VRING_DESC_F_INDIRECT 0x4
+
+struct sVirtIO_RingDesc
+{
+ Uint64 Addr;
+ Uint32 Len;
+ // [0]: Continue using the `Next` field
+ // [1]: Write-only (insead of Read-only)
+ // [2]: Indirect buffer (list of buffer descriptors)
+ Uint16 Flags;
+ Uint16 Next; // Index
+};
+
+#endif
+
--- /dev/null
+/*
+ * Acess2 VirtIO Common Code
+ * - By John Hodge (thePowersGang)
+ *
+ * virtio.c
+ * - Core
+ */
+#define DEBUG 1
+#define VERSION 0x100
+#include <acess.h>
+#include <modules.h>
+#include <semaphore.h>
+#include "include/virtio.h"
+#include "include/virtio_hw.h"
+
+// === TYPES ===
+typedef struct sVirtIO_Queue tVirtIO_Queue;
+
+// === STRUCTURES ===
+struct sVirtIO_Buf
+{
+ Uint16 Idx;
+ Uint16 Queue;
+ tVirtIO_Dev *Dev;
+ void *Handle;
+ const void *BufPtr;
+};
+
+struct sVirtIO_Queue
+{
+ int Size;
+ tVirtIO_QueueCallback Callback;
+ int NoAutoRel;
+
+ volatile struct sVirtIO_RingDesc *Entries;
+ tShortSpinlock lAvailQueue;
+ volatile struct sVirtIO_AvailRing *Avail;
+ Uint16 NextUsedPop;
+ Uint16 LastSeenUsed;
+ volatile struct sVirtIO_UsedRing *Used;
+
+ tSemaphore FreeDescsSem;
+ tShortSpinlock lFreeList;
+ Uint16 FirstUnused;
+
+ tVirtIO_Buf Buffers[];
+};
+
+struct sVirtIO_Dev
+{
+ Uint IRQ;
+ Uint16 IOBase;
+ Uint16 DevCfgBase;
+
+ void *DataPtr;
+
+ int nQueues;
+ struct sVirtIO_Queue *Queues[];
+};
+
+// === PROTOTYPES ===
+ int VirtIO_Install(char **Arguments);
+ int VirtIO_Cleanup(void);
+void VirtIO_IRQHandler(int IRQ, void *Ptr);
+
+// === GLOBALS ===
+MODULE_DEFINE(0, VERSION, VirtIOCommon, VirtIO_Install, VirtIO_Cleanup, NULL);
+
+// === CODE ===
+int VirtIO_Install(char **Arguments)
+{
+ return 0;
+}
+
+int VirtIO_Cleanup(void)
+{
+ Log_Warning("VirtIO", "TODO: Cleanup");
+ return 1;
+}
+
+// --- API ---
+// - Device management
+tVirtIO_Dev *VirtIO_InitDev(Uint16 IOBase, Uint IRQ, Uint32 Features, int MaxQueues, size_t DataSize)
+{
+ tVirtIO_Dev *ret;
+
+ // Reset and init device
+ outb(IOBase + VIRTIO_REG_DEVSTS, 0);
+ outb(IOBase + VIRTIO_REG_DEVSTS, VIRTIO_DEVSTS_ACKNOWLEDGE);
+ outb(IOBase + VIRTIO_REG_DEVSTS, VIRTIO_DEVSTS_DRIVER);
+
+ // Negotiate Features
+ Uint32 support_feat = ind(IOBase + VIRTIO_REG_DEVFEAT);
+ outd(IOBase + VIRTIO_REG_GUESTFEAT, Features & support_feat);
+ LOG("Features: (Dev 0x%08x, Driver 0x%08x)", support_feat, Features);
+
+ // Create structure
+ ret = malloc( offsetof(tVirtIO_Dev, Queues[MaxQueues]) + DataSize );
+ ret->IRQ = IRQ;
+ ret->IOBase = IOBase;
+ ret->nQueues = MaxQueues;
+ ret->DataPtr = &ret->Queues[MaxQueues];
+
+ // TODO: MSI-X makes this move
+ ret->DevCfgBase = IOBase + VIRTIO_REG_DEVSPEC_0;
+
+ // Discover virtqueues
+ for( int i = 0; i < MaxQueues; i ++ )
+ {
+ outw(IOBase + VIRTIO_REG_QUEUESELECT, i);
+ size_t qsz = inw(IOBase + VIRTIO_REG_QUEUESIZE);
+ LOG("Queue #%i: QSZ = %i", i, qsz);
+ if( qsz == 0 ) {
+ ret->Queues[i] = NULL;
+ continue ;
+ }
+ // TODO: Assert that qsz is a power of 2
+
+ tVirtIO_Queue *queue = calloc( offsetof(tVirtIO_Queue, Buffers[qsz]), 1 );
+ queue->Size = qsz;
+ queue->FirstUnused = 0;
+
+ Semaphore_Init(&queue->FreeDescsSem, qsz, qsz, "VirtIO", "FreeDescs");
+
+ // Allocate virtqueue spaces
+ size_t sz1 = qsz*16 + offsetof(struct sVirtIO_AvailRing, Ring[qsz])+2;
+ size_t sz2 = offsetof(struct sVirtIO_UsedRing, Ring[qsz]) + 2;
+ sz1 = (sz1 + PAGE_SIZE-1) & ~(PAGE_SIZE-1);
+ LOG(" sz{1,2} = 0x%x,0x%x", sz1, sz2);
+ queue->Entries = MM_AllocDMA( (sz1+sz2+0xFFF)>>12, 32+12, NULL );
+ queue->Avail = (void*)(queue->Entries + qsz);
+ queue->Used = (void*)((char*)queue->Entries + sz1);
+
+ // Clear and prepare unused list
+ memset((void*)queue->Entries, 0, sz1 + sz2);
+ for( int j = 0; j < qsz; j ++ )
+ {
+ queue->Entries[j].Flags = 1;
+ queue->Entries[j].Next = j+1;
+
+ queue->Buffers[j].Idx = j;
+ queue->Buffers[j].Queue = i;
+ queue->Buffers[j].Dev = ret;
+ }
+ queue->Entries[qsz-1].Flags = 0;
+
+ ret->Queues[i] = queue;
+
+ Uint32 queueaddr = MM_GetPhysAddr(queue->Entries) / 4096;
+ LOG(" Phys %P", MM_GetPhysAddr(queue->Entries));
+ outd(IOBase + VIRTIO_REG_QUEUEADDR, queueaddr);
+ ASSERTC(queueaddr, ==, ind(IOBase + VIRTIO_REG_QUEUEADDR));
+ }
+
+ // Register IRQ Handler
+ IRQ_AddHandler(IRQ, VirtIO_IRQHandler, ret);
+ Uint8 isr = inb(IOBase + VIRTIO_REG_ISRSTS);
+ LOG("isr = %x", isr);
+
+ // Start
+ outb(IOBase + VIRTIO_REG_DEVSTS, VIRTIO_DEVSTS_DRIVER_OK);
+
+ return ret;
+}
+
+Uint32 VirtIO_GetFeatures(tVirtIO_Dev *Dev)
+{
+ return ind(Dev->IOBase + VIRTIO_REG_GUESTFEAT);
+}
+Uint32 VirtIO_GetDevConfig(tVirtIO_Dev *Dev, int Size, Uint8 Offset)
+{
+ switch(Size)
+ {
+ case 8:
+ return inb(Dev->DevCfgBase + Offset);
+ case 16:
+ return inw(Dev->DevCfgBase + Offset);
+ case 32:
+ return ind(Dev->DevCfgBase + Offset);
+ }
+ return 0;
+}
+void *VirtIO_GetDataPtr(tVirtIO_Dev *Dev)
+{
+ return Dev->DataPtr;
+}
+void VirtIO_RemoveDev(tVirtIO_Dev *Dev)
+{
+ UNIMPLEMENTED();
+}
+
+/**
+ * \brief Sets the Queue Callback
+ *
+ * The queue callback is called when
+ * a) a read-only queue entry is retired (device writes it to the Available ring)
+ * b) a write-only queue is handed to the guest (devices writes it to the used ring)
+ */
+int VirtIO_SetQueueCallback(tVirtIO_Dev *Dev, int QueueID, tVirtIO_QueueCallback Callback, int NoAutoRel)
+{
+ ASSERTCR(QueueID, <, Dev->nQueues, -1);
+
+ Dev->Queues[QueueID]->Callback = Callback;
+ Dev->Queues[QueueID]->NoAutoRel = NoAutoRel;
+
+ if( !Callback && NoAutoRel ) {
+ Log_Warning("VirtIO", "%p:%i has had callback==NULL with auto release disabled",
+ Dev, QueueID);
+ }
+
+ return 0;
+}
+
+int VirtIO_int_AllocQueueEntry(tVirtIO_Queue *Queue)
+{
+ if( Semaphore_Wait(&Queue->FreeDescsSem, 1) != 1 ) {
+ return -1;
+ }
+
+ SHORTLOCK(&Queue->lFreeList);
+ int idx = Queue->FirstUnused;
+ ASSERT( Queue->Entries[idx].Flags & VRING_DESC_F_NEXT );
+ Queue->FirstUnused = Queue->Entries[idx].Next;
+ SHORTREL(&Queue->lFreeList);
+
+ return idx;
+}
+
+tVirtIO_Buf *VirtIO_int_AllocBuf(tVirtIO_Queue *Queue, const void *Ptr, size_t Size, Uint Flags, Uint16 Next)
+{
+ int idx = VirtIO_int_AllocQueueEntry(Queue);
+ tVirtIO_Buf *buf = &Queue->Buffers[idx];
+ ASSERTC(idx, ==, buf->Idx);
+
+ LOG("%p:%i[%i] = {%P+0x%x}",
+ buf->Dev, buf->Queue, buf->Idx,
+ MM_GetPhysAddr(Ptr), Size);
+
+ Queue->Entries[idx].Addr = MM_GetPhysAddr(Ptr);
+ Queue->Entries[idx].Len = Size;
+ Queue->Entries[idx].Flags = Flags;
+ Queue->Entries[idx].Next = Next;
+
+ buf->Handle = NULL;
+ buf->BufPtr = Ptr;
+
+ return buf;
+}
+
+tVirtIO_Buf *VirtIO_int_AllocBufV(tVirtIO_Queue *Queue, const char *Ptr, size_t Size, Uint Flags, Uint16 Next)
+{
+ if( ((tVAddr)Ptr & (PAGE_SIZE-1)) + Size > PAGE_SIZE*2 )
+ {
+ Log_Error("VirtIO", ">2 page buffers are not supported");
+ return NULL;
+ }
+
+ tVirtIO_Buf *ret;
+
+ tPAddr phys = MM_GetPhysAddr(Ptr);
+ if( phys + Size-1 != MM_GetPhysAddr( Ptr + Size-1 ) )
+ {
+ size_t fp_size = PAGE_SIZE-(phys%PAGE_SIZE);
+ tVirtIO_Buf *last = VirtIO_int_AllocBuf(Queue, Ptr+fp_size, Size-fp_size, Flags, Next);
+ ret = VirtIO_int_AllocBuf(Queue, Ptr, fp_size, Flags|VRING_DESC_F_NEXT, last->Idx);
+ }
+ else
+ {
+ ret = VirtIO_int_AllocBuf(Queue, Ptr, Size, Flags, Next);
+ }
+ return ret;
+}
+
+/*
+ * Append a ring descriptor to the available ring
+ */
+void VirtIO_int_AddAvailBuf(tVirtIO_Queue *Queue, tVirtIO_Buf *Buf)
+{
+ __sync_synchronize();
+ SHORTLOCK(&Queue->lAvailQueue);
+ Queue->Avail->Ring[ Queue->Avail->Idx & (Queue->Size-1) ] = Buf->Idx;
+ Queue->Avail->Idx ++;
+ SHORTREL(&Queue->lAvailQueue);
+
+ // Notify
+ __sync_synchronize();
+ // TODO: Delay notifications
+ tVirtIO_Dev *dev = Buf->Dev;
+ outw(dev->IOBase + VIRTIO_REG_QUEUENOTIFY, Buf->Queue);
+ LOG("Notifying %p:%i", Buf->Dev, Buf->Queue);
+}
+
+// Send a set of RO buffers
+tVirtIO_Buf *VirtIO_SendBuffers(tVirtIO_Dev *Dev, int QueueID, int nBufs, size_t Sizes[], const void *Ptrs[], void *Handle)
+{
+ tVirtIO_Queue *queue = Dev->Queues[QueueID];
+ tVirtIO_Buf *prev = NULL;
+
+ // Allocate buffers for each non-contiguious region
+ // - these come from the queue's unallocated pool
+ size_t totalsize = 0;
+ for( int i = nBufs; i --; )
+ {
+ if( prev )
+ prev = VirtIO_int_AllocBufV(queue, Ptrs[i], Sizes[i], VRING_DESC_F_NEXT, prev->Idx);
+ else
+ prev = VirtIO_int_AllocBufV(queue, Ptrs[i], Sizes[i], 0, 0);
+ totalsize += Sizes[i];
+ }
+ LOG("Total size 0x%x", totalsize);
+
+ // Final buffer has the handle set to the passed handle
+ // - all others get NULL
+ prev->Handle = Handle;
+
+ // Add first to avaliable ring
+ VirtIO_int_AddAvailBuf(queue, prev);
+
+ return prev;
+}
+
+// Supply a single WO buffer for the device
+tVirtIO_Buf *VirtIO_ReceiveBuffer(tVirtIO_Dev *Dev, int QueueID, size_t Size, void *Ptr, void *Handle)
+{
+ LOG("%p:%i - Add %p+0x%x for RX", Dev, QueueID, Ptr, Size);
+ tVirtIO_Queue *queue = Dev->Queues[QueueID];
+ tVirtIO_Buf *ret = VirtIO_int_AllocBufV(queue, Ptr, Size, VRING_DESC_F_WRITE, 0);
+ ret->Handle = Handle;
+
+ VirtIO_int_AddAvailBuf(queue, ret);
+ return ret;
+}
+
+tVirtIO_Buf *VirtIO_PopBuffer(tVirtIO_Dev *Dev, int QueueID, size_t *Size, const void **Ptr)
+{
+ ASSERTCR(QueueID, <, Dev->nQueues, NULL);
+ tVirtIO_Queue *queue = Dev->Queues[QueueID];
+
+ // TODO: Lock
+ if( queue->NextUsedPop == queue->Used->Idx )
+ return NULL;
+ int qidx = queue->NextUsedPop;
+ queue->NextUsedPop ++;
+
+ int idx = queue->Used->Ring[qidx].ID;
+ if( Size )
+ *Size = queue->Used->Ring[qidx].Len;
+ if( Ptr ) {
+ *Ptr = queue->Buffers[idx].BufPtr;
+ ASSERTC(MM_GetPhysAddr(*Ptr), ==, queue->Entries[idx].Addr);
+ }
+ return &queue->Buffers[idx];
+}
+
+const void *VirtIO_GetBufferPtr(tVirtIO_Buf *Buf, size_t *Size)
+{
+ tVirtIO_Queue *queue = Buf->Dev->Queues[Buf->Queue];
+ if(Size)
+ *Size = queue->Entries[Buf->Idx].Len;
+ return Buf->BufPtr;
+}
+tVirtIO_Dev *VirtIO_GetBufferDev(tVirtIO_Buf *Buf)
+{
+ return Buf->Dev;
+}
+
+void VirtIO_int_ReleaseQDesc(tVirtIO_Queue *Queue, Uint16 Index)
+{
+ LOG("Release QDesc %p:%i into free pool",
+ Queue, Index);
+ SHORTLOCK(&Queue->lFreeList);
+ Queue->Entries[Index].Next = Queue->FirstUnused;
+ Queue->Entries[Index].Flags = VRING_DESC_F_NEXT;
+ Queue->FirstUnused = Index;
+ SHORTREL(&Queue->lFreeList);
+ Semaphore_Signal(&Queue->FreeDescsSem, 1);
+}
+
+/**
+ * \brief Releases all qdescs in the buffer to the free list
+ */
+void VirtIO_ReleaseBuffer(tVirtIO_Buf *Buffer)
+{
+ int idx = Buffer->Idx;
+ tVirtIO_Queue *queue = Buffer->Dev->Queues[Buffer->Queue];
+
+ LOG("Releasing chain at %p:%i/%i",
+ Buffer->Dev, Buffer->Queue, Buffer->Idx);
+
+ int has_next;
+ do {
+ has_next = !!(queue->Entries[idx].Flags & VRING_DESC_F_NEXT);
+ int next_idx = queue->Entries[idx].Next;
+ ASSERTC(!has_next || next_idx,!=,idx);
+
+ VirtIO_int_ReleaseQDesc(queue, idx);
+
+ idx = next_idx;
+ } while(has_next);
+}
+
+void VirtIO_int_ProcessUsedList(tVirtIO_Dev *Dev, tVirtIO_Queue *Queue, int UsedIdx)
+{
+ Uint16 qent = Queue->Used->Ring[UsedIdx].ID;
+ size_t len = Queue->Used->Ring[UsedIdx].Len;
+ LOG("QEnt %i (0x%x bytes) callback w/ Handle=%p",
+ qent, len, Queue->Buffers[qent].Handle);
+ if( Queue->Callback )
+ Queue->Callback(Dev, qent, len, Queue->Buffers[qent].Handle);
+
+ if( !Queue->NoAutoRel )
+ {
+ // Return the buffer to the avaliable pool
+ VirtIO_ReleaseBuffer(&Queue->Buffers[qent]);
+ if(Queue->NextUsedPop == UsedIdx)
+ Queue->NextUsedPop ++;
+ }
+}
+
+void VirtIO_IRQHandler(int IRQ, void *Ptr)
+{
+ tVirtIO_Dev *Dev = Ptr;
+ Uint8 isr = inb(Dev->IOBase + VIRTIO_REG_ISRSTS);
+ LOG("IRQ for %p - ISR = 0x%x", Dev, isr);
+
+ // ISR == 0: Interrupt was not from this card
+ if( isr == 0 )
+ return ;
+
+ // Check each queue
+ for( int i = 0; i < Dev->nQueues; i ++ )
+ {
+ tVirtIO_Queue *queue = Dev->Queues[i];
+ // Check 'used' ring
+ LOG("Queue %i Used: %i ?!= %i (Avail: %i)",
+ i, queue->LastSeenUsed, queue->Used->Idx, queue->Avail->Idx);
+ while( queue->LastSeenUsed != queue->Used->Idx )
+ {
+ int idx = queue->LastSeenUsed;
+ queue->LastSeenUsed ++;
+ VirtIO_int_ProcessUsedList(Dev, queue, idx);
+ }
+ }
+}
+
--- /dev/null
+#
+# Acess2 VirtIO Network driver
+#
+
+OBJ = virtio-net.o
+NAME = VirtIONet
+
+include ../../Makefile.tpl
+
--- /dev/null
+/*
+ * Acess2 VirtIO Network Driver
+ * - By John Hodge (thePowersGang)
+ *
+ * virtio-net.c
+ * - Driver Core
+ */
+#define DEBUG 1
+#define VERSION VER2(1,0)
+#include <acess.h>
+#include <modules.h>
+#include <semaphore.h>
+#include <drv_pci.h>
+#include <IPStack/include/adapters_api.h>
+#include "virtio-net.h"
+#include <Libraries/VirtIO/include/virtio.h>
+
+#define NRXBUFS 4
+
+// === TYPEDEFS ===
+typedef struct sVirtIONet_Dev tVirtIONet_Dev;
+
+// === STRUCTURES ===
+struct sVirtIONet_Dev
+{
+ Uint32 Features;
+ tSemaphore RXPacketSem;
+ void *RXBuffers[NRXBUFS];
+};
+
+// === PROTOTYPES ===
+ int VirtIONet_Install(char **Arguments);
+ int VirtIONet_Cleanup(void);
+void VirtIONet_AddCard(Uint16 IOBase, Uint IRQ);
+ int VirtIONet_RXQueueCallback(tVirtIO_Dev *Dev, int ID, size_t UsedBytes, void *Handle);
+tIPStackBuffer *VirtIONet_WaitForPacket(void *Ptr);
+ int VirtIONet_TXQueueCallback(tVirtIO_Dev *Dev, int ID, size_t UsedBytes, void *Handle);
+ int VirtIONet_SendPacket(void *Ptr, tIPStackBuffer *Buffer);
+
+// === GLOBALS ===
+MODULE_DEFINE(0, VERSION, VirtIONet, VirtIONet_Install, VirtIONet_Cleanup, "IPStack", "VirtIOCommon", NULL);
+tIPStack_AdapterType gVirtIONet_AdapterType = {
+ .Name = "VirtIONet",
+ .Type = ADAPTERTYPE_ETHERNET_1G, // TODO: Differentiate differnet wire protos and speeds
+ .Flags = ADAPTERFLAG_OFFLOAD_MAC, // TODO: IP/TCP/UDP checksum offloading
+ .SendPacket = VirtIONet_SendPacket,
+ .WaitForPacket = VirtIONet_WaitForPacket
+};
+
+// === CODE ===
+int VirtIONet_Install(char **Arguments)
+{
+ int pcidev = -1;
+ // Find network devices
+ while( (pcidev = PCI_GetDeviceByClass(0x020000, 0xFF0000, pcidev)) != -1 )
+ {
+ Uint16 ven, dev;
+ PCI_GetDeviceInfo(pcidev, &ven, &dev, NULL);
+ LOG("Device %i: %x/%x", pcidev, ven, dev);
+ // 0x1AF4:(0x1000-0x103F) are VirtIO devices
+ if( ven != 0x1AF4 || (dev & 0xFFC0) != 0x1000 )
+ continue ;
+ Uint16 subsys_id;
+ PCI_GetDeviceSubsys(pcidev, &ven, &subsys_id);
+ if( subsys_id != 1 ) {
+ Log_Notice("VirtIONet", "Device with network PCI class but not subsys (%i!=1)", subsys_id);
+ continue ;
+ }
+
+ Uint8 irq = PCI_GetIRQ(pcidev);
+ // TODO: Detect bad IRQ
+
+ Uint32 iobase = PCI_GetBAR(pcidev, 0);
+ if( iobase == 0 ) {
+ // oops
+ }
+
+ VirtIONet_AddCard(iobase & ~1, irq);
+ }
+ return 0;
+}
+
+int VirtIONet_Cleanup(void)
+{
+ Log_Warning("VirtIONet", "TODO: Clean up before module unload");
+ return 0;
+}
+
+void VirtIONet_AddCard(Uint16 IOBase, Uint IRQ)
+{
+ ENTER("xMMIOBase xIRQ", IOBase, IRQ);
+ // Should be a VirtIO Network device
+ tVirtIO_Dev *dev = VirtIO_InitDev(
+ IOBase, IRQ,
+ VIRTIO_NET_F_MAC|VIRTIO_NET_F_STATUS|VIRTIO_NET_F_CSUM|VIRTIO_NET_F_MRG_RXBUF
+ |VIRTIO_F_NOTIFY_ON_EMPTY,
+ 3,
+ sizeof(struct sVirtIONet_Dev)
+ );
+ if( !dev ) {
+ // Oops?
+ }
+ tVirtIONet_Dev *ndev = VirtIO_GetDataPtr(dev);
+ Semaphore_Init(&ndev->RXPacketSem, 0, 1, "VirtIONet", "RXSem");
+ ndev->Features = VirtIO_GetFeatures(dev);
+
+ Uint8 mac[6];
+ if( ndev->Features & VIRTIO_NET_F_MAC ) {
+ for( int i = 0; i < 6; i ++ )
+ mac[i] = VirtIO_GetDevConfig(dev, 8, i);
+ Log_Log("VirtIONet", "Card %x,%i - MAC %02x:%02x:%02x:%02x:%02x:%02x",
+ IOBase, IRQ,
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ }
+ else {
+ // x[2,6,A,E] xx xx xx xx xx
+ mac[0] = 0x0A;
+ mac[1] = 0xCE;
+ mac[2] = 0x55;
+ mac[3] = rand() & 0xFF;
+ mac[4] = rand() & 0xFF;
+ mac[5] = rand() & 0xFF;
+ Log_Log("VirtIONet", "Card %x,%i - Random MAC %02x:%02x:%02x:%02x:%02x:%02x",
+ IOBase, IRQ,
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ }
+
+ // NoAutoRel=1 : Keep RX buffers about until worker is done with them
+ VirtIO_SetQueueCallback(dev, 0, VirtIONet_RXQueueCallback, 1);
+ VirtIO_SetQueueCallback(dev, 1, VirtIONet_TXQueueCallback, 0);
+
+ // Set up RX buffers
+ for( int i = 0; i < NRXBUFS; i ++ )
+ {
+ ndev->RXBuffers[i] = MM_AllocDMA(1, -1, NULL);
+ VirtIO_ReceiveBuffer(dev, 0, PAGE_SIZE, ndev->RXBuffers[i], NULL);
+ }
+
+ // Register with IPStack
+ // TODO: Save the returned pointer to do deregister later
+ IPStack_Adapter_Add(&gVirtIONet_AdapterType, dev, mac);
+ LEAVE('-');
+}
+
+int VirtIONet_RXQueueCallback(tVirtIO_Dev *Dev, int ID, size_t UsedBytes, void *Handle)
+{
+ tVirtIONet_Dev *NDev = VirtIO_GetDataPtr(Dev);
+ Semaphore_Signal(&NDev->RXPacketSem, 1);
+ // 1: Don't pop the qdesc
+ return 1;
+}
+
+void VirtIONet_ReleaseRX(void *Arg, size_t HeadLen, size_t FootLen, const void *Data)
+{
+ tVirtIO_Buf *buf = Arg;
+ tVirtIO_Dev *dev = VirtIO_GetBufferDev(buf);
+ // Re-add RX buffer
+ VirtIO_ReleaseBuffer(buf);
+ void *bufptr = (void*)((tVAddr)Data & ~(PAGE_SIZE-1));
+ VirtIO_ReceiveBuffer(dev, 0, PAGE_SIZE, bufptr, NULL);
+}
+
+tIPStackBuffer *VirtIONet_WaitForPacket(void *Ptr)
+{
+ tVirtIO_Dev *VIODev = Ptr;
+ tVirtIONet_Dev *NDev = VirtIO_GetDataPtr(VIODev);
+
+ if( Semaphore_Wait(&NDev->RXPacketSem, 1) != 1 ) {
+ return NULL;
+ }
+
+ size_t size;
+ const void *buf;
+ tVirtIO_Buf *id = VirtIO_PopBuffer(VIODev, VIRTIONET_QUEUE_RX, &size, &buf);
+ if( id == NULL )
+ {
+ // Pop failed, nothing there (possibly not error condition)
+ return NULL;
+ }
+
+ const tVirtIONet_PktHdr *hdr = buf;
+ int nbufs = (NDev->Features & VIRTIO_NET_F_MRG_RXBUF) ? hdr->NumBuffers : 1;
+ size_t dataofs = (NDev->Features & VIRTIO_NET_F_MRG_RXBUF) ? sizeof(*hdr) : sizeof(*hdr)-2;
+
+ ASSERTCR(nbufs, >=, 1, NULL);
+ ASSERTCR(size, >, dataofs, NULL);
+
+ tIPStackBuffer *ret = IPStack_Buffer_CreateBuffer(nbufs);
+ IPStack_Buffer_AppendSubBuffer(ret, 0, size-dataofs, (const char*)buf + dataofs, VirtIONet_ReleaseRX, id);
+
+ // TODO: This will break if descriptors end up being chained
+
+ for( int i = 1; i < nbufs; i ++ )
+ {
+ while( NULL == (id = VirtIO_PopBuffer(VIODev, VIRTIONET_QUEUE_RX, &size, &buf)) )
+ Semaphore_Wait(&NDev->RXPacketSem, 1);
+ IPStack_Buffer_AppendSubBuffer(ret, 0, size, buf, VirtIONet_ReleaseRX, id);
+ }
+ return ret;
+}
+
+int VirtIONet_TXQueueCallback(tVirtIO_Dev *Dev, int ID, size_t UsedBytes, void *Handle)
+{
+ if( Handle ) {
+ LOG("Unlock TX'd buffer %p", Handle);
+ IPStack_Buffer_UnlockBuffer(Handle);
+ }
+ return 0;
+}
+
+int VirtIONet_SendPacket(void *Ptr, tIPStackBuffer *Buffer)
+{
+ tVirtIO_Dev *VIODev = Ptr;
+ tVirtIONet_Dev *NDev = VirtIO_GetDataPtr(VIODev);
+
+ int nBufs = 0;
+ for( int idx = -1; (idx = IPStack_Buffer_GetBuffer(Buffer, idx, NULL, NULL)) != -1; )
+ nBufs ++;
+
+ tVirtIONet_PktHdr hdr;
+
+ hdr.Flags = 0;
+ // GSO (TODO: Acess needs to support this)
+ hdr.GSOType = 0;
+ hdr.HeaderLen = 0;
+ hdr.GSOSize = 0;
+ // IP/TCP checksumming?
+ hdr.CSumStart = 0;
+ hdr.CSumOffset = 0;
+
+ hdr.NumBuffers = 0;
+
+ size_t buflens[1+nBufs];
+ const void *bufptrs[1+nBufs];
+ buflens[0] = sizeof(hdr) - ((NDev->Features & VIRTIO_NET_F_MRG_RXBUF) ? 0 : 2);
+ bufptrs[0] = &hdr;
+ int i = 1;
+ for( int idx = -1; (idx = IPStack_Buffer_GetBuffer(Buffer, idx, &buflens[i], &bufptrs[i])) != -1; )
+ i ++;
+
+ IPStack_Buffer_LockBuffer(Buffer);
+ VirtIO_SendBuffers(VIODev, VIRTIONET_QUEUE_TX, nBufs+1, buflens, bufptrs, Buffer);
+
+ // Wait until TX completes
+ IPStack_Buffer_LockBuffer(Buffer);
+ IPStack_Buffer_UnlockBuffer(Buffer);
+
+ return 0;
+}
+
--- /dev/null
+/*
+ * Acess2 VirtIO Network Driver
+ * - By John Hodge (thePowersGang)
+ *
+ * virtio-net.h
+ * - Hardware definitions
+ */
+#ifndef _VIRTIONET__VIRTIO_NET_H_
+#define _VIRTIONET__VIRTIO_NET_H_
+
+typedef struct sVirtIONet_PktHdr tVirtIONet_PktHdr;
+
+enum eVirtIO_FeatureBits
+{
+ VIRTIO_NET_F_CSUM = (1 << 0), // Checksum offloading
+ VIRTIO_NET_F_GUEST_CSUM = (1 << 1), // ??? "Guest handles packets with partial checksum"
+ VIRTIO_NET_F_MAC = (1 << 5), // Device has given MAC address
+ // TCP Segmentation Offloading / UDP Fragmentation Offloading
+ VIRTIO_NET_F_GUEST_TSO4 = (1 << 7), // Guest can receive TSOv4
+ VIRTIO_NET_F_GUEST_TSO6 = 1 << 8, // Guest can receive TSOv6
+ VIRTIO_NET_F_GUEST_TSOE = 1 << 9, // Guest can receive TSO with ECN (Explicit Congestion Notifcation)
+ VIRTIO_NET_F_GUEST_UFO = 1 << 10, // Guest can recieve UFO
+ VIRTIO_NET_F_HOST_TSO4 = 1 << 11, // Device can receive TSOv4
+ VIRTIO_NET_F_HOST_TSO6 = 1 << 12, // Device can receive TSOv6
+ VIRTIO_NET_F_HOST_TSOE = 1 << 13, // Device can receive TSO with ECN
+ VIRTIO_NET_F_HOST_UFO = 1 << 14, // Device can recieve UFO
+
+ VIRTIO_NET_F_MRG_RXBUF = 1 << 15, // Guest can merge recieve buffers
+ VIRTIO_NET_F_STATUS = 1 << 16, // Configuration status field is avaliable
+ // Control Channel
+ VIRTIO_NET_F_CTRL_VQ = 1 << 17, // Control VQ is avaliable
+ VIRTIO_NET_F_CTRL_RX = 1 << 18, // Control VQ RX mode is supported
+ VIRTIO_NET_F_CTRL_VLAN = 1 << 19, // Control channel VLAN filtering
+ VIRTIO_NET_F_GUEST_ANNOUNCE = 1 << 21, // "Guest can send gratuious packets"
+};
+
+#define VIRTIO_NET_S_LINK_UP 1
+#define VIRTIO_NET_S_ANNOUNCE 2
+
+struct sVirtIONet_Cfg
+{
+ Uint8 MACAddr[6]; // only valid if VIRTIO_NET_F_MAC
+ Uint16 Status; // only valid if VIRTIO_NET_F_STATUS
+};
+
+enum eVirtIONet_Queues
+{
+ VIRTIONET_QUEUE_RX,
+ VIRTIONET_QUEUE_TX,
+ VIRTIONET_QUEUE_CTRL, // only valid if VIRTIO_NET_F_CTRL_VQ
+};
+
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Checksum needs to be performed
+enum eVirtIONet_GSOTypes
+{
+ VIRTIO_NET_HDR_GSO_NONE,
+ VIRTIO_NET_HDR_GSO_TCPV4,
+ VIRTIO_NET_HDR_GSO_UDP,
+ VIRTIO_NET_HDR_GSO_TCPV6,
+ VIRTIO_NET_HDR_GSO_ECN = 0x80
+};
+
+struct sVirtIONet_PktHdr
+{
+ Uint8 Flags;
+ Uint8 GSOType;
+ Uint16 HeaderLen;
+ Uint16 GSOSize;
+ Uint16 CSumStart;
+ Uint16 CSumOffset; // Offset from CSumStart
+
+ Uint16 NumBuffers; // Only if VIRTIO_NET_F_MRG_RXBUF
+};
+
+struct sVirtIONet_CmdHdr
+{
+ Uint8 Class; // Command class (RX, MAC, VLAN, Announce)
+ Uint8 Command; // Actual command (RxPromisc,RxAllMulti,,MACSet,,VLANAdd,VLANDel)
+ Uint8 Data[];
+ // Uint8 Ack;
+};
+
+#endif
+