> man operating_systems
Центр информации по операционным системам :: Форумы :: Операционные системы :: GNU/Linux
 
<< Предыдущая тема | Следующая тема >>
VMware для ядер старше 2.6.11
Модераторы: Roman I Khimov, Wanderer, Dron
Автор Добавил
cmp
Понедельник 08.08.2005 22:27
ID пользователя #279
Зарегистрирован: Понедельник 18.04.2005 15:35
Сообщений: 131
Начиная с версии ядра 2.6.11 сетевой модуль vmware отказывался импортироваться
в ядро,.. такая же история повторилась когда я наконец-то на днях установил
2.6.12. Поскольку в новостных лентах я не встречал каких-либо упоминаний о
этой проблеме я поковырял исходники ядра и далее привожу содержимое файла
userif.c из архива vmnet.tar из пакета vmware, где эта проблема исправленна.

/* **********************************************************
* Copyright 1998 VMware, Inc. All rights reserved. -- VMware Confidential
* **********************************************************/

#include "driver-config.h"

#define EXPORT_SYMTAB

#include <linux/kernel.h>#include <linux/version.h>#include <linux/sched.h>#ifdef KERNEL_2_2
# include <linux/slab.h>#else
# include <linux/malloc.h>#endif
#include <linux/poll.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/skbuff.h>#include <linux/if_ether.h>#include <linux/sockios.h>#include "compat_sock.h"

#define __KERNEL_SYSCALLS__
#include <asm/io.h>#include <linux/proc_fs.h>#include <linux/file.h>#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4)
#include <net/checksum.h>#endif

#include "vnetInt.h"

#include "compat_uaccess.h"
#include "compat_highmem.h"
#include "compat_mm.h"
#include "pgtbl.h"
#include "compat_wait.h"
#include "vmnetInt.h"
#include "vm_atomic.h"

typedef struct VNetUserIFStats {
unsigned read;
unsigned written;
unsigned queued;
unsigned droppedDown;
unsigned droppedMismatch;
unsigned droppedOverflow;
unsigned droppedLargePacket;
} VNetUserIFStats;

typedef struct VNetUserIF {
VNetPort port;
struct sk_buff_head packetQueue;
uint32* pollPtr;
Atomic_uint32* actPtr;
uint32 pollMask;
uint32 actMask;
uint32 pollMaskAct;
uint32 clusterCount;
uint32* recvClusterPtr;
wait_queue_head_t waitQueue;
struct page* actPage;
struct page* pollPage;
struct page* recvClusterPage;
VNetUserIFStats stats;
} VNetUserIF;

static void VNetUserIfUnsetupNotify(VNetUserIF *userIf);
static int VNetUserIfSetupNotify(VNetUserIF *userIf, VNet_Notify2 *vn);

/*
*-----------------------------------------------------------------------------
*
* UserifLockPage --
*
* Lock in core the physical page associated to a valid virtual
* address --hpreg
*
* Results:
* The page structure on success
* NULL on failure: memory pressure. Retry later
*
* Side effects:
* Loads page into memory
* Pre-2.4.19 version may temporarily lock another physical page
*
*-----------------------------------------------------------------------------
*/

static INLINE struct page *
UserifLockPage(VA addr) // IN
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
struct page *page = NULL;
int retval;

down_read(¤t->mm->mmap_sem);
retval = get_user_pages(current, current->mm, addr,
1, 1, 0, &page, NULL);
up_read(¤t->mm->mmap_sem);

if (retval != 1) {
return NULL;
}

return page;
#else
struct page *page;
struct page *check;
volatile int c;

/*
* Establish a virtual to physical mapping by touching the physical
* page. Because the address is valid, there is no need to check the return
* value here --hpreg
*/
compat_get_user(c, (char *)addr);

page = PgtblVa2Page(addr);
if (page == pfn_to_page(0)) {
/* The mapping went away --hpreg */
return NULL;
}

/* Lock the physical page --hpreg */
get_page(page);

check = PgtblVa2Page(addr);
if (check != page) {
/*
* The mapping went away or was modified, so we didn't lock the right
* physical page --hpreg
*/

/* Unlock the physical page --hpreg */
put_page(page);

return NULL;
}

/* We locked the right physical page --hpreg */
return page;
#endif
}

static INLINE int
VNetUserIfMapUint32Ptr(VA uAddr, struct page **p, uint32 **ptr)
{
if (verify_area(VERIFY_READ, (void *)uAddr, sizeof (uint32)) ||
verify_area(VERIFY_WRITE, (void *)uAddr, sizeof (uint32)) ||
(((uAddr + sizeof(uint32)) & ~(PAGE_SIZE - 1)) !=
(uAddr & ~(PAGE_SIZE - 1)))) {
return -EINVAL;
}

*p = UserifLockPage(uAddr);
if (*p == NULL) {
return -EAGAIN;
}

*ptr = (uint32 *)((char *)kmap(*p) + (uAddr & (PAGE_SIZE - 1)));
return 0;
}

/*
*-----------------------------------------------------------------------------
*
* VNetUserIfSetupNotify --
*
* Sets up notification by filling in pollPtr, actPtr, and recvClusterPtr
* fields.
*
* Results:
* 0 on success
* < 0 on failure: the actual value determines the type of failure
*
* Side effects:
* Fields pollPtr, actPtr, recvClusterPtr, pollPage, actPage, and
* recvClusterPage are filled in VNetUserIf structure.
*
*-----------------------------------------------------------------------------
*/

static INLINE int
VNetUserIfSetupNotify(VNetUserIF *userIf, // IN
VNet_Notify2 *vn) // IN
{
int retval;

if (userIf->pollPage || userIf->actPage || userIf->recvClusterPage) {
LOG(0, (KERN_DEBUG "vmnet: Notification mechanism already active\n"));
return -EBUSY;
}

if ((retval = VNetUserIfMapUint32Ptr((VA)vn->pollPtr, &userIf->pollPage,
&userIf->pollPtr)) < 0) {
return retval;
}

if ((retval = VNetUserIfMapUint32Ptr((VA)vn->actPtr, &userIf->actPage,
(uint32 **)&userIf->actPtr)) < 0) {
VNetUserIfUnsetupNotify(userIf);
return retval;
}

if (vn->version >= 2) {
if ((retval = VNetUserIfMapUint32Ptr((VA)vn->recvClusterPtr,
&userIf->recvClusterPage,
&userIf->recvClusterPtr)) < 0) {
VNetUserIfUnsetupNotify(userIf);
return retval;
}
} else {
userIf->recvClusterPage = NULL;
userIf->recvClusterPtr = &userIf->clusterCount;
}

userIf->pollMask = userIf->pollMaskAct = vn->pollMask;
userIf->actMask = vn->actMask;
return 0;
}

/*
*----------------------------------------------------------------------
*
* VNetUserIfUnsetupNotify --
*
* Destroys permanent mapping for notify structure provided by user.
*
* Results:
* None.
*
* Side effects:
* Fields pollPtr, actPtr, recvClusterPtr, etc. in VNetUserIf
* structure are cleared.
*
*----------------------------------------------------------------------
*/

static void
VNetUserIfUnsetupNotify(VNetUserIF *userIf) // IN
{
if (userIf->pollPage) {
kunmap(userIf->pollPage);
put_page(userIf->pollPage);
}
if (userIf->actPage) {
kunmap(userIf->actPage);
put_page(userIf->actPage);
}
if (userIf->recvClusterPage) {
kunmap(userIf->recvClusterPage);
put_page(userIf->recvClusterPage);
}
userIf->pollPtr = NULL;
userIf->pollPage = NULL;
userIf->actPtr = NULL;
userIf->actPage = NULL;
userIf->recvClusterPtr = &userIf->clusterCount;
userIf->recvClusterPage = NULL;
userIf->pollMask = 0;
userIf->pollMaskAct = 0;
userIf->actMask = 0;
}


/*
*----------------------------------------------------------------------
*
* VNetUserIfFree --
*
* Free the user interface port.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/

static void
VNetUserIfFree(VNetJack *this) // IN
{
VNetUserIF *userIf = (VNetUserIF*)this;
struct sk_buff *skb;

for (;;) {
skb = skb_dequeue(&userIf->packetQueue);
if (skb == NULL) {
break;
}
dev_kfree_skb(skb);
}

if (userIf->pollPtr) {
VNetUserIfUnsetupNotify(userIf);
userIf->clusterCount = 0;
}

if (this->procEntry) {
VNetProc_RemoveEntry(this->procEntry, NULL);
}

kfree(userIf);
}


/*
*----------------------------------------------------------------------
*
* VNetUserIfReceive --
*
* This jack is receiving a packet. Take appropriate action.
*
* Results:
* None.
*
* Side effects:
* Frees skb.
*
*----------------------------------------------------------------------
*/

static void
VNetUserIfReceive(VNetJack *this, // IN
struct sk_buff *skb) // IN
{
VNetUserIF *userIf = (VNetUserIF*)this->private;
uint8 *dest = SKB_2_DESTMAC(skb);

if (!UP_AND_RUNNING(userIf->port.flags)) {
userIf->stats.droppedDown++;
goto drop_packet;
}

if (!VNetPacketMatch(dest,
userIf->port.paddr,
userIf->port.ladrf,
userIf->port.flags)) {
userIf->stats.droppedMismatch++;
goto drop_packet;
}

if (skb_queue_len(&userIf->packetQueue) >= VNET_MAX_QLEN) {
userIf->stats.droppedOverflow++;
goto drop_packet;
}

if (skb->len > ETHER_MAX_QUEUED_PACKET) {
userIf->stats.droppedLargePacket++;
goto drop_packet;
}

userIf->stats.queued++;

skb_queue_tail(&userIf->packetQueue, skb);
if (userIf->pollPtr) {
*userIf->pollPtr |= userIf->pollMask;
if (skb_queue_len(&userIf->packetQueue) >= (*userIf->recvClusterPtr)) {
Atomic_Or(userIf->actPtr, userIf->actMask);
}
}
wake_up(&userIf->waitQueue);
return;

drop_packet:
dev_kfree_skb(skb);
}


/*
*----------------------------------------------------------------------
*
* VNetUserIfProcRead --
*
* Callback for read operation on this userif entry in vnets proc fs.
*
* Results:
* Length of read operation.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/

static int
VNetUserIfProcRead(char *page, // IN/OUT: buffer to write into
char **start, // OUT: 0 if file < 4k, else offset into
// page
off_t off, // IN: offset of read into the file
int count, // IN: maximum number of bytes to read
int *eof, // OUT: TRUE if there is nothing more to
// read
void *data) // IN: client data - not used
{
VNetUserIF *userIf = (VNetUserIF*)data;
int len = 0;

if (!userIf) {
return len;
}

len += VNetPrintPort(&userIf->port, page+len);

len += sprintf(page+len, "read %u written %u queued %u ",
userIf->stats.read,
userIf->stats.written,
userIf->stats.queued);

len += sprintf(page+len,
"dropped.down %u dropped.mismatch %u "
"dropped.overflow %u dropped.largePacket %u",
userIf->stats.droppedDown,
userIf->stats.droppedMismatch,
userIf->stats.droppedOverflow,
userIf->stats.droppedLargePacket);

len += sprintf(page+len, "\n");

*start = 0;
*eof = 1;
return len;
}


#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4)
/*
*----------------------------------------------------------------------
*
* VNetCsumCopyDatagram --
*
* Copy part of datagram to userspace doing checksum at same time.
*
* Do not mark this function INLINE, it is recursive! With all gcc's
* released up to now (<= gcc-3.3.1) inlining this function just
* consumes 120 more bytes of code and goes completely mad on
* register allocation, storing almost everything in the memory.
*
* Results:
* folded checksum (non-negative value) on success,
* -EINVAL if offset is too big,
* -EFAULT if buffer is an invalid area
*
* Side effects:
* Data copied to the buffer.
*
*----------------------------------------------------------------------
*/

static int
VNetCsumCopyDatagram(const struct sk_buff *skb, // IN: skb to copy
unsigned int offset, // IN: how many bytes skip
char *buf) // OUT: where to copy data
{
unsigned int csum;
int err = 0;
int len = skb_headlen(skb) - offset;
char *curr = buf;
const skb_frag_t *frag;

/*
* Something bad happened. We skip only up to skb->nh.raw, and skb->nh.raw
* must be in the header, otherwise we are in the big troubles.
*/
if (len < 0) {
return -EINVAL;
}

csum = csum_and_copy_to_user(skb->data + offset, curr, len, 0, &err);
if (err) {
return err;
}
curr += len;

for (frag = skb_shinfo(skb)->frags;
frag != skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
frag++) {
if (frag->size > 0) {
unsigned int tmpCsum;
const void *vaddr;

vaddr = kmap(frag->page);
tmpCsum = csum_and_copy_to_user(vaddr + frag->page_offset,
curr, frag->size, 0, &err);
kunmap(frag->page);
if (err) {
return err;
}
csum = csum_block_add(csum, tmpCsum, curr - buf);
curr += frag->size;
}
}

for (skb = skb_shinfo(skb)->frag_list; skb != NULL; skb = skb->next) {
int tmpCsum;

tmpCsum = VNetCsumCopyDatagram(skb, 0, curr);
if (tmpCsum < 0) {
return tmpCsum;
}
/* Folded checksum must be inverted before we can use it */
csum = csum_block_add(csum, tmpCsum ^ 0xFFFF, curr - buf);
curr += skb->len;
}
return csum_fold(csum);
}
#endif


/*
*----------------------------------------------------------------------
*
* VNetCopyDatagramToUser --
*
* Copy complete datagram to the user space. Fill correct checksum
* into the copied datagram if nobody did it yet.
*
* Results:
* On success byte count, on failure -EFAULT.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/

static INLINE_SINGLE_CALLER int
VNetCopyDatagramToUser(const struct sk_buff *skb, // IN
char *buf, // OUT
size_t count) // IN
{
if (count > skb->len) {
count = skb->len;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 4)
if (copy_to_user(buf, skb->data, count)) {
return -EFAULT;
}
#else

struct iovec iov = { .iov_base = buf };

/*
* If truncation occurs, we do not bother with checksumming - caller cannot
* verify checksum anyway in such case, and copy without checksum it is
* faster.
*/
if (skb->pkt_type == PACKET_OUTGOING && /* Packet must be outgoing */
skb->ip_summed == CHECKSUM_HW && /* Without checksum */
skb->h.raw != skb->nh.raw && /* We must know where header is */
skb->len == count) { /* No truncation may occur */
size_t skl;
int csum;
u_int16_t csum16;

skl = skb->h.raw - skb->data;

iov.iov_len = skl;
if (skb_copy_datagram_iovec(skb, 0, &iov, skl)) {
return -EFAULT;
}
csum = VNetCsumCopyDatagram(skb, skl, buf + skl);
if (csum < 0) {
return csum;
}
csum16 = csum;
if (copy_to_user(buf + skl + skb->csum, &csum16, sizeof csum16)) {
return -EFAULT;
}
} else {
iov.iov_len = count;
if (skb_copy_datagram_iovec(skb, 0, &iov, count)) {
return -EFAULT;
}
}
#endif
return count;
}


/*
*----------------------------------------------------------------------
*
* VNetUserIfRead --
*
* The virtual network's read file operation. Reads the next pending
* packet for this network connection.
*
* Results:
* On success the len of the packet received,
* else if no packet waiting and nonblocking 0,
* else -errno.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/

static int
VNetUserIfRead(VNetPort *port, // IN
struct file *filp, // IN
char *buf, // OUT
size_t count) // IN
{
VNetUserIF *userIf = (VNetUserIF*)port->jack.private;
struct sk_buff *skb;
int ret;
DECLARE_WAITQUEUE(wait, current);

add_wait_queue(&userIf->waitQueue, &wait);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
mb();
skb = skb_peek(&userIf->packetQueue);
if (skb && (skb->len > count)) {
skb = NULL;
ret = -EMSGSIZE;
break;
}
ret = -EAGAIN;
skb = skb_dequeue(&userIf->packetQueue);

if (userIf->pollPtr) {
if (skb_queue_empty(&userIf->packetQueue)) {
*userIf->pollPtr &= ~userIf->pollMask;
}
#if 0
/*
* Disable this for now since the monitor likes to assert that
* actions are present and thus can't cope with them disappearing
* out from under it. See bug 47760. -Jeremy. 22 July 2004
*/

if (skb_queue_len(&userIf->packetQueue) < (*userIf->recvClusterPtr) &&
(Atomic_Read(userIf->actPtr) & userIf->actMask) != 0) {
Atomic_And(userIf->actPtr, ~userIf->actMask);
}
#endif
}

if (skb != NULL || filp->f_flags & O_NONBLOCK) {
break;
}
ret = -EINTR;
if (signal_pending(current)) {
break;
}
schedule();
}
current->state = TASK_RUNNING;
remove_wait_queue(&userIf->waitQueue, &wait);
if (! skb) {
return ret;
}

userIf->stats.read++;

count = VNetCopyDatagramToUser(skb, buf, count);
dev_kfree_skb(skb);
return count;
}


/*
*----------------------------------------------------------------------
*
* VNetUserIfWrite --
*
* The virtual network's write file operation. Send the raw packet
* to the network.
*
* Results:
* On success the count of bytes written else errno.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/

static int
VNetUserIfWrite(VNetPort *port, // IN
struct file *filp, // IN
const char *buf, // IN
size_t count) // IN
{
VNetUserIF *userIf = (VNetUserIF*)port->jack.private;
struct sk_buff *skb;

/*
* Check size
*/

if (count < sizeof (struct ethhdr)) {
return -EINVAL;
}

/*
* Required to enforce the downWhenAddrMismatch policy in the MAC
* layer. --hpreg
*/
if (!UP_AND_RUNNING(userIf->port.flags)) {
userIf->stats.droppedDown++;
return count;
}

/*
* Allocate an sk_buff.
*/

skb = dev_alloc_skb(count + 7);
if (skb == NULL) {
// XXX obey O_NONBLOCK?
return -ENOBUFS;
}

skb_reserve(skb, 2);

/*
* Copy the data and send it.
*/

userIf->stats.written++;
if (copy_from_user(skb_put(skb, count), buf, count)) {
dev_kfree_skb(skb);
return -EFAULT;
}

VNetSend(&userIf->port.jack, skb);

return count;
}


/*
*-----------------------------------------------------------------------------
*
* VNetUserIfIoctl --
*
* XXX
*
* Results:
* 0 on success
* -errno on failure
*
* Side effects:
* None
*
*-----------------------------------------------------------------------------
*/

static int
VNetUserIfIoctl(VNetPort *port, // IN
struct file *filp, // IN
unsigned int iocmd, // IN
unsigned long ioarg) // IN or OUT depending on iocmd
{
VNetUserIF *userIf = (VNetUserIF*)port->jack.private;

switch (iocmd) {
case OLD_SIOCSETNOTIFY:
iocmd = SIOCSETNOTIFY;
break;
case OLD_SIOCSETNOTIFY2:
iocmd = SIOCSETNOTIFY2;
break;
case OLD_SIOCUNSETNOTIFY:
iocmd = SIOCUNSETNOTIFY;
break;
case OLD_SIOCSETCLUSTERSIZE:
iocmd = SIOCSETCLUSTERSIZE;
break;
}

switch (iocmd) {
case SIOCSETNOTIFY:
{
int retval;
struct {
uintptr32 ptr;
uint32 mask;
} vn0;
VNet_Notify2 vn2;
if (copy_from_user(&vn0, (void *)ioarg, sizeof vn0)) {
return -EFAULT;
}
vn2.version = 1;
vn2.actPtr = vn0.ptr;
vn2.pollPtr = vn0.ptr;
vn2.actMask = vn0.mask;
vn2.pollMask = vn0.mask;
vn2.recvClusterPtr = 0;

retval = VNetUserIfSetupNotify(userIf, &vn2);
if (retval < 0) {
return retval;
}
userIf->pollMaskAct = 0;

userIf->clusterCount = 1;
break;
}

case SIOCSETNOTIFY2:
/*
* ORs pollMask into the integer pointed to by ptr if pending packet. Is
* cleared when all packets are drained.
*/
{
int retval;
VNet_NotifyH vnh;
VNet_Notify2 vn;

if (copy_from_user(&vnh, (void *)ioarg, sizeof vnh)) {
return -EFAULT;
}

switch (vnh.version) {
case 1:
{
VNet_Notify1 vn1;

if (copy_from_user(&vn1, (void *)ioarg, sizeof vn1)) {
return -EFAULT;
}
vn.version = 1;
vn.actPtr = vn1.actPtr;
vn.pollPtr = vn1.pollPtr;
vn.actMask = vn1.actMask;
vn.pollMask = vn1.pollMask;
vn.recvClusterPtr = 0;
break;
}
case 2:
if (copy_from_user(&vn, (void *)ioarg, sizeof vn)) {
return -EFAULT;
}
break;
default:
return -EINVAL;
}

retval = VNetUserIfSetupNotify(userIf, &vn);
if (retval < 0) {
return retval;
}

userIf->clusterCount = 1;
break;
}
case SIOCUNSETNOTIFY:
if (!userIf->pollPtr) {
/* This should always happen on ESX. */
return -EINVAL;
}
VNetUserIfUnsetupNotify(userIf);
userIf->clusterCount = 0;
break;

case SIOCSETCLUSTERSIZE:
if (!userIf->pollPtr) {
/* This should always happen on ESX. */
return -EINVAL;
}
userIf->clusterCount = ioarg;

break;

default:
return -ENOIOCTLCMD;
break;
}

return 0;
}


/*
*----------------------------------------------------------------------
*
* VNetUserIfPoll --
*
* The virtual network's file poll operation.
*
* Results:
* Return POLLIN if success, else sleep and return 0.
* FIXME: Should not we always return POLLOUT?
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/

static int
VNetUserIfPoll(VNetPort *port, // IN
struct file *filp, // IN
poll_table *wait) // IN
{
VNetUserIF *userIf = (VNetUserIF*)port->jack.private;

poll_wait(filp, &userIf->waitQueue, wait);
if (!skb_queue_empty(&userIf->packetQueue)) {
return POLLIN;
}

return 0;
}


/*
*----------------------------------------------------------------------
*
* VNetUserIf_Create --
*
* Create a user level port to the wonderful world of virtual
* networking.
*
* Results:
* Errno. Also returns an allocated port to connect to,
* NULL on error.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/

int
VNetUserIf_Create(VNetPort **ret) // OUT
{
VNetUserIF *userIf;
static unsigned id = 0;
int retval;

userIf = kmalloc(sizeof *userIf, GFP_USER);
if (!userIf) {
return -ENOMEM;
}

/*
* Initialize fields.
*/

userIf->port.id = id++;

userIf->port.jack.peer = NULL;
userIf->port.jack.numPorts = 1;
VNetSnprintf(userIf->port.jack.name, sizeof userIf->port.jack.name,
"userif%u", userIf->port.id);
userIf->port.jack.private = userIf;
userIf->port.jack.index = 0;
userIf->port.jack.procEntry = NULL;
userIf->port.jack.free = VNetUserIfFree;
userIf->port.jack.rcv = VNetUserIfReceive;
userIf->port.jack.cycleDetect = NULL;
userIf->port.jack.portsChanged = NULL;
userIf->port.jack.isBridged = NULL;
userIf->pollPtr = NULL;
userIf->actPtr = NULL;
userIf->pollPage = NULL;
userIf->actPage = NULL;
userIf->recvClusterPage = NULL;
userIf->recvClusterPtr = &userIf->clusterCount;
userIf->clusterCount = 0;
userIf->pollMask = userIf->pollMaskAct = userIf->actMask = 0;

/*
* Make proc entry for this jack.
*/

retval = VNetProc_MakeEntry(NULL, userIf->port.jack.name, S_IFREG,
&userIf->port.jack.procEntry);
if (retval) {
if (retval == -ENXIO) {
userIf->port.jack.procEntry = NULL;
} else {
kfree(userIf);
return retval;
}
} else {
userIf->port.jack.procEntry->read_proc = VNetUserIfProcRead;
userIf->port.jack.procEntry->data = userIf;
}

/*
* Rest of fields.
*/

userIf->port.flags = IFF_RUNNING;

memset(userIf->port.paddr, 0, sizeof userIf->port.paddr);
memset(userIf->port.ladrf, 0, sizeof userIf->port.ladrf);

VNet_MakeMACAddress(&userIf->port);

userIf->port.fileOpRead = VNetUserIfRead;
userIf->port.fileOpWrite = VNetUserIfWrite;
userIf->port.fileOpIoctl = VNetUserIfIoctl;
userIf->port.fileOpPoll = VNetUserIfPoll;

skb_queue_head_init(&(userIf->packetQueue));
init_waitqueue_head(&userIf->waitQueue);

memset(&userIf->stats, 0, sizeof userIf->stats);

*ret = (VNetPort*)userIf;
return 0;
}

<span class='smallblacktext'>[ Редактирование ]</span>
Наверх
Roman I Khimov
Понедельник 08.08.2005 23:09

ID пользователя #1
Зарегистрирован: Воскресенье 27.06.2004 12:37
Местонахождение: Санкт-Петербург
Сообщений: 601
Бросьте вы уже эту гадость. Берите QEMU. http://qemu.org/


Греби и улыбайся!
Наверх
Сайт
Dron
Вторник 09.08.2005 09:56


ID пользователя #13
Зарегистрирован: Понедельник 05.07.2004 11:16
Местонахождение: Москва
Сообщений: 651
qemu до vmware не дотягивает пока...
интерфейса нету никакого...
Насчет скорости - тут конечно трудно сказать, сравнивать надо.

Кстати Роман... я вот че-то не пойму как мне запустить qemu под виндой... че-то она упорно не хотит... (хотя этого ты наверное не знаешь тоже...)
[ Редактирование вторник 09.08.2005 10:47 ]

Одну из двух вечных российских проблем можно, в принципе, решить с помощью асфальтоукладчиков и катков. А вот с дорогами, конечно, будет труднее...

Андрей Валяев
Наверх
Сайт
Roman I Khimov
Вторник 09.08.2005 13:12

ID пользователя #1
Зарегистрирован: Воскресенье 27.06.2004 12:37
Местонахождение: Санкт-Петербург
Сообщений: 601
Насчет последнего не скажу ничего, ибо нафиг не нужно. Но насчет интерфейса скажу тоже самое - нафиг не нужно. Он прекрасно работает, жаль только, что ускоритель несвободный... Без ускорителя, правда, есть режим qemu-fast, но для него скорее всего придется собрать ядро с легкими танцами с бубном.


Греби и улыбайся!
Наверх
Сайт
 

Перейти:     Наверх

Транслировать сообщения этой темы: rss 0.92 Транслировать сообщения этой темы: rss 2.0 Транслировать сообщения этой темы: RDF
Powered by e107 Forum System

© OSRC.info, 2004-2010.
Авторские права на любые материалы, авторы которых явно указаны, принадлежат их авторам. По вопросам публикации таких материалов обращайтесь к авторам.
Авторские права на любые другие материалы принадлежат OSRC.info.
Сайт является помещением библиотеки. Копирование, сохранение на жестком диске или иной способ сохранения произведений осуществляются пользователями на свой риск.
При использовании материалов сайта ссылка на OSRC.info обязательна.