netmap: several typo fixes

No functional changes intended.

(cherry picked from commit 45c67e8f6b56b9744f01142747fadf291fe3fad2)
This commit is contained in:
Vincenzo Maffione 2021-04-02 07:01:20 +00:00 committed by Franco Fichtner
parent 44a25ccdd0
commit 9ba5919b1b
17 changed files with 55 additions and 44 deletions

View File

@ -114,7 +114,7 @@ vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
virtqueue_notify(vq);
/* Update hwcur depending on where we stopped. */
kring->nr_hwcur = nm_i; /* note we migth break early */
kring->nr_hwcur = nm_i; /* note we might break early */
}
/* Free used slots. We only consider our own used buffers, recognized

View File

@ -193,7 +193,7 @@ ports attached to the switch)
* always attached to a bridge.
* Persistent VALE ports must must be created separately, and i
* then attached like normal NICs. The NIOCREGIF we are examining
* will find them only if they had previosly been created and
* will find them only if they had previously been created and
* attached (see VALE_CTL below).
*
* - netmap_pipe_adapter [netmap_pipe.c]
@ -977,7 +977,7 @@ static void
netmap_mem_drop(struct netmap_adapter *na)
{
int last = netmap_mem_deref(na->nm_mem, na);
/* if the native allocator had been overrided on regif,
/* if the native allocator had been overridden on regif,
* restore it now and drop the temporary one
*/
if (last && na->nm_mem_prev) {
@ -1057,7 +1057,7 @@ netmap_do_unregif(struct netmap_priv_d *priv)
}
}
/* possibily decrement counter of tx_si/rx_si users */
/* possibly decrement counter of tx_si/rx_si users */
netmap_unset_ringid(priv);
/* delete the nifp */
netmap_mem_if_delete(na, priv->np_nifp);
@ -1139,7 +1139,7 @@ netmap_dtor(void *data)
* they will be forwarded to the hw TX rings, saving the application
* from doing the same task in user-space.
*
* Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD
* Transparent forwarding can be enabled per-ring, by setting the NR_FORWARD
* flag, or globally with the netmap_fwd sysctl.
*
* The transfer NIC --> host is relatively easy, just encapsulate
@ -1603,7 +1603,7 @@ netmap_get_na(struct nmreq_header *hdr,
netmap_adapter_get(ret);
/*
* if the adapter supports the host rings and it is not alread open,
* if the adapter supports the host rings and it is not already open,
* try to set the number of host rings as requested by the user
*/
if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
@ -2027,7 +2027,7 @@ netmap_krings_get(struct netmap_priv_d *priv)
priv->np_qlast[NR_RX]);
/* first round: check that all the requested rings
* are neither alread exclusively owned, nor we
* are neither already exclusively owned, nor we
* want exclusive ownership when they are already in use
*/
foreach_selected_ring(priv, t, i, kring) {
@ -2502,7 +2502,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
}
/* Make a kernel-space copy of the user-space nr_body.
* For convenince, the nr_body pointer and the pointers
* For convenience, the nr_body pointer and the pointers
* in the options list will be replaced with their
* kernel-space counterparts. The original pointers are
* saved internally and later restored by nmreq_copyout
@ -3092,7 +3092,7 @@ nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
* The list of options is copied and the pointers adjusted. The
* original pointers are saved before the option they belonged.
*
* The option table has an entry for every availabe option. Entries
* The option table has an entry for every available option. Entries
* for options that have not been passed contain NULL.
*
*/

View File

@ -804,7 +804,7 @@ nm_bdg_ctl_polling_stop(struct netmap_adapter *na)
bps->configured = false;
nm_os_free(bps);
bna->na_polling_state = NULL;
/* reenable interrupts */
/* re-enable interrupts */
nma_intr_enable(bna->hwna, 1);
return 0;
}

View File

@ -260,7 +260,7 @@ nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
#ifdef INET
uint16_t pseudolen = datalen + iph->protocol;
/* Compute and insert the pseudo-header cheksum. */
/* Compute and insert the pseudo-header checksum. */
*check = in_pseudo(iph->saddr, iph->daddr,
htobe16(pseudolen));
/* Compute the checksum on TCP/UDP header + payload

View File

@ -446,7 +446,7 @@ generic_mbuf_destructor(struct mbuf *m)
/*
* First, clear the event mbuf.
* In principle, the event 'm' should match the one stored
* on ring 'r'. However we check it explicitely to stay
* on ring 'r'. However we check it explicitly to stay
* safe against lower layers (qdisc, driver, etc.) changing
* MBUF_TXQ(m) under our feet. If the match is not found
* on 'r', we try to see if it belongs to some other ring.

View File

@ -283,7 +283,7 @@ struct nm_bridge;
struct netmap_priv_d;
struct nm_bdg_args;
/* os-specific NM_SELINFO_T initialzation/destruction functions */
/* os-specific NM_SELINFO_T initialization/destruction functions */
int nm_os_selinfo_init(NM_SELINFO_T *, const char *name);
void nm_os_selinfo_uninit(NM_SELINFO_T *);
@ -466,7 +466,7 @@ struct netmap_kring {
struct netmap_adapter *na;
/* the adapter that wants to be notified when this kring has
* new slots avaialable. This is usually the same as the above,
* new slots available. This is usually the same as the above,
* but wrappers may let it point to themselves
*/
struct netmap_adapter *notify_na;
@ -660,7 +660,7 @@ struct nm_config_info {
/*
* default type for the magic field.
* May be overriden in glue code.
* May be overridden in glue code.
*/
#ifndef NM_OS_MAGIC
#define NM_OS_MAGIC uint32_t
@ -1611,7 +1611,7 @@ extern int netmap_debug; /* for debugging */
#define netmap_debug (0)
#endif /* !CONFIG_NETMAP_DEBUG */
enum { /* debug flags */
NM_DEBUG_ON = 1, /* generic debug messsages */
NM_DEBUG_ON = 1, /* generic debug messages */
NM_DEBUG_HOST = 0x2, /* debug host stack */
NM_DEBUG_RXSYNC = 0x10, /* debug on rxsync/txsync */
NM_DEBUG_TXSYNC = 0x20,

View File

@ -222,7 +222,7 @@ netmap_sync_kloop_tx_ring(const struct sync_kloop_ring_args *a)
if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
if (!a->busy_wait) {
/* Reenable notifications. */
/* Re-enable notifications. */
csb_ktoa_kick_enable(csb_ktoa, 1);
}
nm_prerr("txsync() failed");
@ -267,7 +267,7 @@ netmap_sync_kloop_tx_ring(const struct sync_kloop_ring_args *a)
* go to sleep, waiting for a kick from the application when new
* new slots are ready for transmission.
*/
/* Reenable notifications. */
/* Re-enable notifications. */
csb_ktoa_kick_enable(csb_ktoa, 1);
/* Double check, with store-load memory barrier. */
nm_stld_barrier();
@ -356,7 +356,7 @@ netmap_sync_kloop_rx_ring(const struct sync_kloop_ring_args *a)
if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
if (!a->busy_wait) {
/* Reenable notifications. */
/* Re-enable notifications. */
csb_ktoa_kick_enable(csb_ktoa, 1);
}
nm_prerr("rxsync() failed");
@ -402,7 +402,7 @@ netmap_sync_kloop_rx_ring(const struct sync_kloop_ring_args *a)
* go to sleep, waiting for a kick from the application when new receive
* slots are available.
*/
/* Reenable notifications. */
/* Re-enable notifications. */
csb_ktoa_kick_enable(csb_ktoa, 1);
/* Double check, with store-load memory barrier. */
nm_stld_barrier();
@ -1000,7 +1000,7 @@ netmap_pt_guest_txsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa,
* space is available.
*/
if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
/* Reenable notifications. */
/* Re-enable notifications. */
atok->appl_need_kick = 1;
/* Double check, with store-load memory barrier. */
nm_stld_barrier();
@ -1061,7 +1061,7 @@ netmap_pt_guest_rxsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa,
* completed.
*/
if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
/* Reenable notifications. */
/* Re-enable notifications. */
atok->appl_need_kick = 1;
/* Double check, with store-load memory barrier. */
nm_stld_barrier();

View File

@ -164,7 +164,7 @@ struct netmap_mem_d {
u_int flags;
#define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */
#define NETMAP_MEM_HIDDEN 0x8 /* beeing prepared */
#define NETMAP_MEM_HIDDEN 0x8 /* being prepared */
int lasterr; /* last error for curr config */
int active; /* active users */
int refcount;
@ -172,7 +172,7 @@ struct netmap_mem_d {
struct netmap_obj_pool pools[NETMAP_POOLS_NR];
nm_memid_t nm_id; /* allocator identifier */
int nm_grp; /* iommu groupd id */
int nm_grp; /* iommu group id */
/* list of all existing allocators, sorted by nm_id */
struct netmap_mem_d *prev, *next;
@ -848,7 +848,7 @@ netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
*
* 2a - cycle all the objects in every pool, get the list
* of the physical address descriptors
* 2b - calculate the offset in the array of pages desciptor in the
* 2b - calculate the offset in the array of pages descriptor in the
* main MDL
* 2c - copy the descriptors of the object in the main MDL
*
@ -1400,7 +1400,7 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
if (p->lut) {
/* if the lut is already there we assume that also all the
* clusters have already been allocated, possibily by somebody
* clusters have already been allocated, possibly by somebody
* else (e.g., extmem). In the latter case, the alloc_done flag
* will remain at zero, so that we will not attempt to
* deallocate the clusters by ourselves in
@ -1904,7 +1904,7 @@ netmap_mem2_rings_create(struct netmap_adapter *na)
u_int len, ndesc;
if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) {
/* uneeded, or already created by somebody else */
/* unneeded, or already created by somebody else */
if (netmap_debug & NM_DEBUG_MEM)
nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)",
kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);

View File

@ -57,7 +57,7 @@
* of the object, and from there locate the offset from the beginning
* of the region.
*
* The invididual allocators manage a pool of memory for objects of
* The individual allocators manage a pool of memory for objects of
* the same size.
* The pool is split into smaller clusters, whose size is a
* multiple of the page size. The cluster size is chosen
@ -70,7 +70,7 @@
* Allocation scans the bitmap; this is done only on attach, so we are not
* too worried about performance
*
* For each allocator we can define (thorugh sysctl) the size and
* For each allocator we can define (through sysctl) the size and
* number of each object. Memory is allocated at the first use of a
* netmap file descriptor, and can be freed when all such descriptors
* have been released (including unmapping the memory).

View File

@ -490,7 +490,7 @@ netmap_monitor_stop(struct netmap_adapter *na)
/* drop the additional ref taken in netmap_monitor_add() */
netmap_adapter_put(zkring->zmon_list[t].prev->na);
}
/* orhpan the zmon list */
/* orphan the zmon list */
if (z->next != NULL)
z->next->zmon_list[t].prev = NULL;
z->next = NULL;
@ -603,7 +603,7 @@ netmap_zmon_parent_sync(struct netmap_kring *kring, int flags, enum txrx tx)
mring = mkring->ring;
mlim = mkring->nkr_num_slots - 1;
/* get the relased slots (rel_slots) */
/* get the released slots (rel_slots) */
if (tx == NR_TX) {
beg = kring->nr_hwtail + 1;
error = kring->mon_sync(kring, flags);

View File

@ -718,7 +718,7 @@ do { \
static __inline uint32_t
nm_vale_rthash(const uint8_t *addr)
{
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hash key
b += addr[5] << 8;
b += addr[4];
@ -1495,7 +1495,7 @@ nm_vi_destroy(const char *name)
goto err;
}
/* also make sure that nobody is using the inferface */
/* also make sure that nobody is using the interface */
if (NETMAP_OWNED_BY_ANY(&vpna->up) ||
vpna->up.na_refcount > 1 /* any ref besides the one in nm_vi_create()? */) {
error = EBUSY;

View File

@ -147,7 +147,7 @@
* netmap:foo*, or another registration should be done to open at least a
* NIC TX queue in netmap mode.
*
* + Netmap is not currently able to deal with intercepted trasmit mbufs which
* + Netmap is not currently able to deal with intercepted transmit mbufs which
* require offloadings like TSO, UFO, checksumming offloadings, etc. It is
* responsibility of the user to disable those offloadings (e.g. using
* ifconfig on FreeBSD or ethtool -K on Linux) for an interface that is being
@ -479,7 +479,7 @@ struct netmap_if {
/* Header common to all request options. */
struct nmreq_option {
/* Pointer ot the next option. */
/* Pointer to the next option. */
uint64_t nro_next;
/* Option type. */
uint32_t nro_reqtype;

View File

@ -55,7 +55,7 @@
* To compute the next index in a circular ring you can use
* i = nm_ring_next(ring, i);
*
* To ease porting apps from pcap to netmap we supply a few fuctions
* To ease porting apps from pcap to netmap we supply a few functions
* that can be called to open, close, read and write on netmap in a way
* similar to libpcap. Note that the read/write function depend on
* an ioctl()/select()/poll() being issued to refill rings or push
@ -305,7 +305,7 @@ typedef void (*nm_cb_t)(u_char *, const struct nm_pkthdr *, const u_char *d);
* nm_open() opens a file descriptor, binds to a port and maps memory.
*
* ifname (netmap:foo or vale:foo) is the port name
* a suffix can indicate the follwing:
* a suffix can indicate the following:
* ^ bind the host (sw) ring pair
* * bind host and NIC ring pairs
* -NN bind individual NIC ring pair
@ -682,7 +682,7 @@ nm_parse(const char *ifname, struct nm_desc *d, char *err)
nr_flags = NR_REG_PIPE_MASTER;
p_state = P_GETNUM;
break;
case '}': /* pipe (slave endoint) */
case '}': /* pipe (slave endpoint) */
nr_flags = NR_REG_PIPE_SLAVE;
p_state = P_GETNUM;
break;
@ -970,6 +970,7 @@ nm_close(struct nm_desc *d)
return 0;
}
static int
nm_mmap(struct nm_desc *d, const struct nm_desc *parent)
{

View File

@ -27,6 +27,16 @@
* $FreeBSD$
*/
/*
* This program contains a suite of unit tests for the netmap control device.
*
* On FreeBSD, you can run these tests with Kyua once installed in the system:
* # kyua test -k /usr/tests/sys/netmap/Kyuafile
*
* On Linux, you can run them directly:
* # ./ctrl-api-test
*/
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/wait.h>

View File

@ -498,7 +498,7 @@ init_groups(void)
* when the need to drop arises, we roll it back to head.
*/
struct morefrag {
uint16_t last_flag; /* for intput rings */
uint16_t last_flag; /* for input rings */
uint32_t last_hash; /* for input rings */
uint32_t shadow_head; /* for output rings */
};

View File

@ -3149,7 +3149,7 @@ main(int arc, char **argv)
if (g.virt_header) {
/* Set the virtio-net header length, since the user asked
* for it explicitely. */
* for it explicitly. */
set_vnet_hdr_len(&g);
} else {
/* Check whether the netmap port we opened requires us to send

View File

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd March 31, 2020
.Dd April 2, 2021
.Dt VALECTL 8
.Os
.Sh NAME
@ -97,7 +97,7 @@ The name must be different from any other network interface
already present in the system.
.It Fl r Ar interface
Destroy the persistent VALE port with name
.Ar inteface .
.Ar interface .
.It Fl l Ar valeSSS:PPP
Show the internal bridge number and port number of the given switch port.
.It Fl p Ar valeSSS:PPP
@ -150,7 +150,7 @@ Using this option you can let them share memory with other ports.
Pass 1 as
.Ar memid
to use the global memory region already shared by all
harware netmap ports.
hardware netmap ports.
.El
.Sh SEE ALSO
.Xr netmap 4 ,
@ -158,6 +158,6 @@ harware netmap ports.
.Sh AUTHORS
.An -nosplit
.Nm
was written by
has been written by
.An Michio Honda
at NetApp.