Root/target/linux/generic/files/crypto/ocf/hifn/hifn7751.c

1/* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
2
3/*-
4 * Invertex AEON / Hifn 7751 driver
5 * Copyright (c) 1999 Invertex Inc. All rights reserved.
6 * Copyright (c) 1999 Theo de Raadt
7 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
8 * http://www.netsec.net
9 * Copyright (c) 2003 Hifn Inc.
10 *
11 * This driver is based on a previous driver by Invertex, for which they
12 * requested: Please send any comments, feedback, bug-fixes, or feature
13 * requests to software@invertex.com.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * Effort sponsored in part by the Defense Advanced Research Projects
39 * Agency (DARPA) and Air Force Research Laboratory, Air Force
40 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
41 *
42 *
43__FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
44 */
45
46/*
47 * Driver for various Hifn encryption processors.
48 */
49#include <linux/version.h>
50#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
51#include <linux/config.h>
52#endif
53#include <linux/module.h>
54#include <linux/init.h>
55#include <linux/list.h>
56#include <linux/slab.h>
57#include <linux/wait.h>
58#include <linux/sched.h>
59#include <linux/pci.h>
60#include <linux/delay.h>
61#include <linux/interrupt.h>
62#include <linux/spinlock.h>
63#include <linux/random.h>
64#include <linux/skbuff.h>
65#include <asm/io.h>
66
67#include <cryptodev.h>
68#include <uio.h>
69#include <hifn/hifn7751reg.h>
70#include <hifn/hifn7751var.h>
71
72#if 1
73#define DPRINTF(a...) if (hifn_debug) { \
74                            printk("%s: ", sc ? \
75                                device_get_nameunit(sc->sc_dev) : "hifn"); \
76                            printk(a); \
77                        } else
78#else
79#define DPRINTF(a...)
80#endif
81
82static inline int
83pci_get_revid(struct pci_dev *dev)
84{
85    u8 rid = 0;
86    pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
87    return rid;
88}
89
90static struct hifn_stats hifnstats;
91
92#define debug hifn_debug
93int hifn_debug = 0;
94module_param(hifn_debug, int, 0644);
95MODULE_PARM_DESC(hifn_debug, "Enable debug");
96
97int hifn_maxbatch = 1;
98module_param(hifn_maxbatch, int, 0644);
99MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
100
101int hifn_cache_linesize = 0x10;
102module_param(hifn_cache_linesize, int, 0444);
103MODULE_PARM_DESC(hifn_cache_linesize, "PCI config cache line size");
104
105#ifdef MODULE_PARM
106char *hifn_pllconfig = NULL;
107MODULE_PARM(hifn_pllconfig, "s");
108#else
109char hifn_pllconfig[32]; /* This setting is RO after loading */
110module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
111#endif
112MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
113
114#ifdef HIFN_VULCANDEV
115#include <sys/conf.h>
116#include <sys/uio.h>
117
118static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
119#endif
120
121/*
122 * Prototypes and count for the pci_device structure
123 */
124static int hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
125static void hifn_remove(struct pci_dev *dev);
126
127static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
128static int hifn_freesession(device_t, u_int64_t);
129static int hifn_process(device_t, struct cryptop *, int);
130
131static device_method_t hifn_methods = {
132    /* crypto device methods */
133    DEVMETHOD(cryptodev_newsession, hifn_newsession),
134    DEVMETHOD(cryptodev_freesession,hifn_freesession),
135    DEVMETHOD(cryptodev_process, hifn_process),
136};
137
138static void hifn_reset_board(struct hifn_softc *, int);
139static void hifn_reset_puc(struct hifn_softc *);
140static void hifn_puc_wait(struct hifn_softc *);
141static int hifn_enable_crypto(struct hifn_softc *);
142static void hifn_set_retry(struct hifn_softc *sc);
143static void hifn_init_dma(struct hifn_softc *);
144static void hifn_init_pci_registers(struct hifn_softc *);
145static int hifn_sramsize(struct hifn_softc *);
146static int hifn_dramsize(struct hifn_softc *);
147static int hifn_ramtype(struct hifn_softc *);
148static void hifn_sessions(struct hifn_softc *);
149#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
150static irqreturn_t hifn_intr(int irq, void *arg);
151#else
152static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
153#endif
154static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
155static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
156static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
157static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
158static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
159static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
160static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
161static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
162static int hifn_init_pubrng(struct hifn_softc *);
163static void hifn_tick(unsigned long arg);
164static void hifn_abort(struct hifn_softc *);
165static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
166
167static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
168static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
169
170#ifdef CONFIG_OCF_RANDOMHARVEST
171static int hifn_read_random(void *arg, u_int32_t *buf, int len);
172#endif
173
174#define HIFN_MAX_CHIPS 8
175static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
176
177static __inline u_int32_t
178READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
179{
180    u_int32_t v = readl(sc->sc_bar0 + reg);
181    sc->sc_bar0_lastreg = (bus_size_t) -1;
182    return (v);
183}
184#define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
185
186static __inline u_int32_t
187READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
188{
189    u_int32_t v = readl(sc->sc_bar1 + reg);
190    sc->sc_bar1_lastreg = (bus_size_t) -1;
191    return (v);
192}
193#define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
194
195/*
196 * map in a given buffer (great on some arches :-)
197 */
198
199static int
200pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
201{
202    struct iovec *iov = uio->uio_iov;
203
204    DPRINTF("%s()\n", __FUNCTION__);
205
206    buf->mapsize = 0;
207    for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
208        buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
209                iov->iov_base, iov->iov_len,
210                PCI_DMA_BIDIRECTIONAL);
211        buf->segs[buf->nsegs].ds_len = iov->iov_len;
212        buf->mapsize += iov->iov_len;
213        iov++;
214        buf->nsegs++;
215    }
216    /* identify this buffer by the first segment */
217    buf->map = (void *) buf->segs[0].ds_addr;
218    return(0);
219}
220
221/*
222 * map in a given sk_buff
223 */
224
225static int
226pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
227{
228    int i;
229
230    DPRINTF("%s()\n", __FUNCTION__);
231
232    buf->mapsize = 0;
233
234    buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
235            skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
236    buf->segs[0].ds_len = skb_headlen(skb);
237    buf->mapsize += buf->segs[0].ds_len;
238
239    buf->nsegs = 1;
240
241    for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
242        buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
243        buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
244                page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
245                    skb_shinfo(skb)->frags[i].page_offset,
246                buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
247        buf->mapsize += buf->segs[buf->nsegs].ds_len;
248        buf->nsegs++;
249    }
250
251    /* identify this buffer by the first segment */
252    buf->map = (void *) buf->segs[0].ds_addr;
253    return(0);
254}
255
256/*
257 * map in a given contiguous buffer
258 */
259
260static int
261pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
262{
263    DPRINTF("%s()\n", __FUNCTION__);
264
265    buf->mapsize = 0;
266    buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
267            b, len, PCI_DMA_BIDIRECTIONAL);
268    buf->segs[0].ds_len = len;
269    buf->mapsize += buf->segs[0].ds_len;
270    buf->nsegs = 1;
271
272    /* identify this buffer by the first segment */
273    buf->map = (void *) buf->segs[0].ds_addr;
274    return(0);
275}
276
277#if 0 /* not needed at this time */
278static void
279pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
280{
281    int i;
282
283    DPRINTF("%s()\n", __FUNCTION__);
284    for (i = 0; i < buf->nsegs; i++)
285        pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
286                buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
287}
288#endif
289
290static void
291pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
292{
293    int i;
294    DPRINTF("%s()\n", __FUNCTION__);
295    for (i = 0; i < buf->nsegs; i++) {
296        pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
297                buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
298        buf->segs[i].ds_addr = 0;
299        buf->segs[i].ds_len = 0;
300    }
301    buf->nsegs = 0;
302    buf->mapsize = 0;
303    buf->map = 0;
304}
305
306static const char*
307hifn_partname(struct hifn_softc *sc)
308{
309    /* XXX sprintf numbers when not decoded */
310    switch (pci_get_vendor(sc->sc_pcidev)) {
311    case PCI_VENDOR_HIFN:
312        switch (pci_get_device(sc->sc_pcidev)) {
313        case PCI_PRODUCT_HIFN_6500: return "Hifn 6500";
314        case PCI_PRODUCT_HIFN_7751: return "Hifn 7751";
315        case PCI_PRODUCT_HIFN_7811: return "Hifn 7811";
316        case PCI_PRODUCT_HIFN_7951: return "Hifn 7951";
317        case PCI_PRODUCT_HIFN_7955: return "Hifn 7955";
318        case PCI_PRODUCT_HIFN_7956: return "Hifn 7956";
319        }
320        return "Hifn unknown-part";
321    case PCI_VENDOR_INVERTEX:
322        switch (pci_get_device(sc->sc_pcidev)) {
323        case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
324        }
325        return "Invertex unknown-part";
326    case PCI_VENDOR_NETSEC:
327        switch (pci_get_device(sc->sc_pcidev)) {
328        case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751";
329        }
330        return "NetSec unknown-part";
331    }
332    return "Unknown-vendor unknown-part";
333}
334
335static u_int
336checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
337{
338    struct hifn_softc *sc = pci_get_drvdata(dev);
339    if (v > max) {
340        device_printf(sc->sc_dev, "Warning, %s %u out of range, "
341            "using max %u\n", what, v, max);
342        v = max;
343    } else if (v < min) {
344        device_printf(sc->sc_dev, "Warning, %s %u out of range, "
345            "using min %u\n", what, v, min);
346        v = min;
347    }
348    return v;
349}
350
351/*
352 * Select PLL configuration for 795x parts. This is complicated in
353 * that we cannot determine the optimal parameters without user input.
354 * The reference clock is derived from an external clock through a
355 * multiplier. The external clock is either the host bus (i.e. PCI)
356 * or an external clock generator. When using the PCI bus we assume
357 * the clock is either 33 or 66 MHz; for an external source we cannot
358 * tell the speed.
359 *
360 * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
361 * for an external source, followed by the frequency. We calculate
362 * the appropriate multiplier and PLL register contents accordingly.
363 * When no configuration is given we default to "pci66" since that
364 * always will allow the card to work. If a card is using the PCI
365 * bus clock and in a 33MHz slot then it will be operating at half
366 * speed until the correct information is provided.
367 *
368 * We use a default setting of "ext66" because according to Mike Ham
369 * of HiFn, almost every board in existence has an external crystal
370 * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
371 * because PCI33 can have clocks from 0 to 33Mhz, and some have
372 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
373 */
374static void
375hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
376{
377    const char *pllspec = hifn_pllconfig;
378    u_int freq, mul, fl, fh;
379    u_int32_t pllconfig;
380    char *nxt;
381
382    if (pllspec == NULL)
383        pllspec = "ext66";
384    fl = 33, fh = 66;
385    pllconfig = 0;
386    if (strncmp(pllspec, "ext", 3) == 0) {
387        pllspec += 3;
388        pllconfig |= HIFN_PLL_REF_SEL;
389        switch (pci_get_device(dev)) {
390        case PCI_PRODUCT_HIFN_7955:
391        case PCI_PRODUCT_HIFN_7956:
392            fl = 20, fh = 100;
393            break;
394#ifdef notyet
395        case PCI_PRODUCT_HIFN_7954:
396            fl = 20, fh = 66;
397            break;
398#endif
399        }
400    } else if (strncmp(pllspec, "pci", 3) == 0)
401        pllspec += 3;
402    freq = strtoul(pllspec, &nxt, 10);
403    if (nxt == pllspec)
404        freq = 66;
405    else
406        freq = checkmaxmin(dev, "frequency", freq, fl, fh);
407    /*
408     * Calculate multiplier. We target a Fck of 266 MHz,
409     * allowing only even values, possibly rounded down.
410     * Multipliers > 8 must set the charge pump current.
411     */
412    mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
413    pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
414    if (mul > 8)
415        pllconfig |= HIFN_PLL_IS;
416    *pll = pllconfig;
417}
418
419/*
420 * Attach an interface that successfully probed.
421 */
422static int
423hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
424{
425    struct hifn_softc *sc = NULL;
426    char rbase;
427    u_int16_t ena, rev;
428    int rseg, rc;
429    unsigned long mem_start, mem_len;
430    static int num_chips = 0;
431
432    DPRINTF("%s()\n", __FUNCTION__);
433
434    if (pci_enable_device(dev) < 0)
435        return(-ENODEV);
436
437#ifdef HAVE_PCI_SET_MWI
438    if (pci_set_mwi(dev))
439        return(-ENODEV);
440#endif
441
442    if (!dev->irq) {
443        printk("hifn: found device with no IRQ assigned. check BIOS settings!");
444        pci_disable_device(dev);
445        return(-ENODEV);
446    }
447
448    sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
449    if (!sc)
450        return(-ENOMEM);
451    memset(sc, 0, sizeof(*sc));
452
453    softc_device_init(sc, "hifn", num_chips, hifn_methods);
454
455    sc->sc_pcidev = dev;
456    sc->sc_irq = -1;
457    sc->sc_cid = -1;
458    sc->sc_num = num_chips++;
459    if (sc->sc_num < HIFN_MAX_CHIPS)
460        hifn_chip_idx[sc->sc_num] = sc;
461
462    pci_set_drvdata(sc->sc_pcidev, sc);
463
464    spin_lock_init(&sc->sc_mtx);
465
466    /* XXX handle power management */
467
468    /*
469     * The 7951 and 795x have a random number generator and
470     * public key support; note this.
471     */
472    if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
473        (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
474         pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
475         pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
476        sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
477    /*
478     * The 7811 has a random number generator and
479     * we also note it's identity 'cuz of some quirks.
480     */
481    if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
482        pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
483        sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
484
485    /*
486     * The 795x parts support AES.
487     */
488    if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
489        (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
490         pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
491        sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
492        /*
493         * Select PLL configuration. This depends on the
494         * bus and board design and must be manually configured
495         * if the default setting is unacceptable.
496         */
497        hifn_getpllconfig(dev, &sc->sc_pllconfig);
498    }
499
500    /*
501     * Setup PCI resources. Note that we record the bus
502     * tag and handle for each register mapping, this is
503     * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
504     * and WRITE_REG_1 macros throughout the driver.
505     */
506    mem_start = pci_resource_start(sc->sc_pcidev, 0);
507    mem_len = pci_resource_len(sc->sc_pcidev, 0);
508    sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
509    if (!sc->sc_bar0) {
510        device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
511        goto fail;
512    }
513    sc->sc_bar0_lastreg = (bus_size_t) -1;
514
515    mem_start = pci_resource_start(sc->sc_pcidev, 1);
516    mem_len = pci_resource_len(sc->sc_pcidev, 1);
517    sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
518    if (!sc->sc_bar1) {
519        device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
520        goto fail;
521    }
522    sc->sc_bar1_lastreg = (bus_size_t) -1;
523
524    /* fix up the bus size */
525    if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
526        device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
527        goto fail;
528    }
529    if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
530        device_printf(sc->sc_dev,
531                "No usable consistent DMA configuration, aborting.\n");
532        goto fail;
533    }
534
535    hifn_set_retry(sc);
536
537    /*
538     * Setup the area where the Hifn DMA's descriptors
539     * and associated data structures.
540     */
541    sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
542            sizeof(*sc->sc_dma),
543            &sc->sc_dma_physaddr);
544    if (!sc->sc_dma) {
545        device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
546        goto fail;
547    }
548    bzero(sc->sc_dma, sizeof(*sc->sc_dma));
549
550    /*
551     * Reset the board and do the ``secret handshake''
552     * to enable the crypto support. Then complete the
553     * initialization procedure by setting up the interrupt
554     * and hooking in to the system crypto support so we'll
555     * get used for system services like the crypto device,
556     * IPsec, RNG device, etc.
557     */
558    hifn_reset_board(sc, 0);
559
560    if (hifn_enable_crypto(sc) != 0) {
561        device_printf(sc->sc_dev, "crypto enabling failed\n");
562        goto fail;
563    }
564    hifn_reset_puc(sc);
565
566    hifn_init_dma(sc);
567    hifn_init_pci_registers(sc);
568
569    pci_set_master(sc->sc_pcidev);
570
571    /* XXX can't dynamically determine ram type for 795x; force dram */
572    if (sc->sc_flags & HIFN_IS_7956)
573        sc->sc_drammodel = 1;
574    else if (hifn_ramtype(sc))
575        goto fail;
576
577    if (sc->sc_drammodel == 0)
578        hifn_sramsize(sc);
579    else
580        hifn_dramsize(sc);
581
582    /*
583     * Workaround for NetSec 7751 rev A: half ram size because two
584     * of the address lines were left floating
585     */
586    if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
587        pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
588        pci_get_revid(dev) == 0x61) /*XXX???*/
589        sc->sc_ramsize >>= 1;
590
591    /*
592     * Arrange the interrupt line.
593     */
594    rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
595    if (rc) {
596        device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
597        goto fail;
598    }
599    sc->sc_irq = dev->irq;
600
601    hifn_sessions(sc);
602
603    /*
604     * NB: Keep only the low 16 bits; this masks the chip id
605     * from the 7951.
606     */
607    rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
608
609    rseg = sc->sc_ramsize / 1024;
610    rbase = 'K';
611    if (sc->sc_ramsize >= (1024 * 1024)) {
612        rbase = 'M';
613        rseg /= 1024;
614    }
615    device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
616        hifn_partname(sc), rev,
617        rseg, rbase, sc->sc_drammodel ? 'd' : 's');
618    if (sc->sc_flags & HIFN_IS_7956)
619        printf(", pll=0x%x<%s clk, %ux mult>",
620            sc->sc_pllconfig,
621            sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
622            2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
623    printf("\n");
624
625    sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
626    if (sc->sc_cid < 0) {
627        device_printf(sc->sc_dev, "could not get crypto driver id\n");
628        goto fail;
629    }
630
631    WRITE_REG_0(sc, HIFN_0_PUCNFG,
632        READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
633    ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
634
635    switch (ena) {
636    case HIFN_PUSTAT_ENA_2:
637        crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
638        crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
639        if (sc->sc_flags & HIFN_HAS_AES)
640            crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
641        /*FALLTHROUGH*/
642    case HIFN_PUSTAT_ENA_1:
643        crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
644        crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
645        crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
646        crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
647        crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
648        break;
649    }
650
651    if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
652        hifn_init_pubrng(sc);
653
654    init_timer(&sc->sc_tickto);
655    sc->sc_tickto.function = hifn_tick;
656    sc->sc_tickto.data = (unsigned long) sc->sc_num;
657    mod_timer(&sc->sc_tickto, jiffies + HZ);
658
659    return (0);
660
661fail:
662    if (sc->sc_cid >= 0)
663        crypto_unregister_all(sc->sc_cid);
664    if (sc->sc_irq != -1)
665        free_irq(sc->sc_irq, sc);
666    if (sc->sc_dma) {
667        /* Turn off DMA polling */
668        WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
669            HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
670
671        pci_free_consistent(sc->sc_pcidev,
672                sizeof(*sc->sc_dma),
673                sc->sc_dma, sc->sc_dma_physaddr);
674    }
675    kfree(sc);
676    return (-ENXIO);
677}
678
679/*
680 * Detach an interface that successfully probed.
681 */
682static void
683hifn_remove(struct pci_dev *dev)
684{
685    struct hifn_softc *sc = pci_get_drvdata(dev);
686    unsigned long l_flags;
687
688    DPRINTF("%s()\n", __FUNCTION__);
689
690    KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
691
692    /* disable interrupts */
693    HIFN_LOCK(sc);
694    WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
695    HIFN_UNLOCK(sc);
696
697    /*XXX other resources */
698    del_timer_sync(&sc->sc_tickto);
699
700    /* Turn off DMA polling */
701    WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
702        HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
703
704    crypto_unregister_all(sc->sc_cid);
705
706    free_irq(sc->sc_irq, sc);
707
708    pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
709                sc->sc_dma, sc->sc_dma_physaddr);
710}
711
712
713static int
714hifn_init_pubrng(struct hifn_softc *sc)
715{
716    int i;
717
718    DPRINTF("%s()\n", __FUNCTION__);
719
720    if ((sc->sc_flags & HIFN_IS_7811) == 0) {
721        /* Reset 7951 public key/rng engine */
722        WRITE_REG_1(sc, HIFN_1_PUB_RESET,
723            READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
724
725        for (i = 0; i < 100; i++) {
726            DELAY(1000);
727            if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
728                HIFN_PUBRST_RESET) == 0)
729                break;
730        }
731
732        if (i == 100) {
733            device_printf(sc->sc_dev, "public key init failed\n");
734            return (1);
735        }
736    }
737
738    /* Enable the rng, if available */
739#ifdef CONFIG_OCF_RANDOMHARVEST
740    if (sc->sc_flags & HIFN_HAS_RNG) {
741        if (sc->sc_flags & HIFN_IS_7811) {
742            u_int32_t r;
743            r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
744            if (r & HIFN_7811_RNGENA_ENA) {
745                r &= ~HIFN_7811_RNGENA_ENA;
746                WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
747            }
748            WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
749                HIFN_7811_RNGCFG_DEFL);
750            r |= HIFN_7811_RNGENA_ENA;
751            WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
752        } else
753            WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
754                READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
755                HIFN_RNGCFG_ENA);
756
757        sc->sc_rngfirst = 1;
758        crypto_rregister(sc->sc_cid, hifn_read_random, sc);
759    }
760#endif
761
762    /* Enable public key engine, if available */
763    if (sc->sc_flags & HIFN_HAS_PUBLIC) {
764        WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
765        sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
766        WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
767#ifdef HIFN_VULCANDEV
768        sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
769                    UID_ROOT, GID_WHEEL, 0666,
770                    "vulcanpk");
771        sc->sc_pkdev->si_drv1 = sc;
772#endif
773    }
774
775    return (0);
776}
777
778#ifdef CONFIG_OCF_RANDOMHARVEST
779static int
780hifn_read_random(void *arg, u_int32_t *buf, int len)
781{
782    struct hifn_softc *sc = (struct hifn_softc *) arg;
783    u_int32_t sts;
784    int i, rc = 0;
785
786    if (len <= 0)
787        return rc;
788
789    if (sc->sc_flags & HIFN_IS_7811) {
790        /* ONLY VALID ON 7811!!!! */
791        for (i = 0; i < 5; i++) {
792            sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
793            if (sts & HIFN_7811_RNGSTS_UFL) {
794                device_printf(sc->sc_dev,
795                          "RNG underflow: disabling\n");
796                /* DAVIDM perhaps return -1 */
797                break;
798            }
799            if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
800                break;
801
802            /*
803             * There are at least two words in the RNG FIFO
804             * at this point.
805             */
806            if (rc < len)
807                buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
808            if (rc < len)
809                buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
810        }
811    } else
812        buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
813
814    /* NB: discard first data read */
815    if (sc->sc_rngfirst) {
816        sc->sc_rngfirst = 0;
817        rc = 0;
818    }
819
820    return(rc);
821}
822#endif /* CONFIG_OCF_RANDOMHARVEST */
823
824static void
825hifn_puc_wait(struct hifn_softc *sc)
826{
827    int i;
828    int reg = HIFN_0_PUCTRL;
829
830    if (sc->sc_flags & HIFN_IS_7956) {
831        reg = HIFN_0_PUCTRL2;
832    }
833
834    for (i = 5000; i > 0; i--) {
835        DELAY(1);
836        if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
837            break;
838    }
839    if (!i)
840        device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
841                READ_REG_0(sc, HIFN_0_PUCTRL));
842}
843
844/*
845 * Reset the processing unit.
846 */
847static void
848hifn_reset_puc(struct hifn_softc *sc)
849{
850    /* Reset processing unit */
851    int reg = HIFN_0_PUCTRL;
852
853    if (sc->sc_flags & HIFN_IS_7956) {
854        reg = HIFN_0_PUCTRL2;
855    }
856    WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
857
858    hifn_puc_wait(sc);
859}
860
861/*
862 * Set the Retry and TRDY registers; note that we set them to
863 * zero because the 7811 locks up when forced to retry (section
864 * 3.6 of "Specification Update SU-0014-04". Not clear if we
865 * should do this for all Hifn parts, but it doesn't seem to hurt.
866 */
867static void
868hifn_set_retry(struct hifn_softc *sc)
869{
870    DPRINTF("%s()\n", __FUNCTION__);
871    /* NB: RETRY only responds to 8-bit reads/writes */
872    pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
873    pci_write_config_byte(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
874    /* piggy back the cache line setting here */
875    pci_write_config_byte(sc->sc_pcidev, PCI_CACHE_LINE_SIZE, hifn_cache_linesize);
876}
877
878/*
879 * Resets the board. Values in the regesters are left as is
880 * from the reset (i.e. initial values are assigned elsewhere).
881 */
882static void
883hifn_reset_board(struct hifn_softc *sc, int full)
884{
885    u_int32_t reg;
886
887    DPRINTF("%s()\n", __FUNCTION__);
888    /*
889     * Set polling in the DMA configuration register to zero. 0x7 avoids
890     * resetting the board and zeros out the other fields.
891     */
892    WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
893        HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
894
895    /*
896     * Now that polling has been disabled, we have to wait 1 ms
897     * before resetting the board.
898     */
899    DELAY(1000);
900
901    /* Reset the DMA unit */
902    if (full) {
903        WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
904        DELAY(1000);
905    } else {
906        WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
907            HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
908        hifn_reset_puc(sc);
909    }
910
911    KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
912    bzero(sc->sc_dma, sizeof(*sc->sc_dma));
913
914    /* Bring dma unit out of reset */
915    WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
916        HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
917
918    hifn_puc_wait(sc);
919    hifn_set_retry(sc);
920
921    if (sc->sc_flags & HIFN_IS_7811) {
922        for (reg = 0; reg < 1000; reg++) {
923            if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
924                HIFN_MIPSRST_CRAMINIT)
925                break;
926            DELAY(1000);
927        }
928        if (reg == 1000)
929            device_printf(sc->sc_dev, ": cram init timeout\n");
930    } else {
931      /* set up DMA configuration register #2 */
932      /* turn off all PK and BAR0 swaps */
933      WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
934              (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
935              (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
936              (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
937              (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
938    }
939}
940
941static u_int32_t
942hifn_next_signature(u_int32_t a, u_int cnt)
943{
944    int i;
945    u_int32_t v;
946
947    for (i = 0; i < cnt; i++) {
948
949        /* get the parity */
950        v = a & 0x80080125;
951        v ^= v >> 16;
952        v ^= v >> 8;
953        v ^= v >> 4;
954        v ^= v >> 2;
955        v ^= v >> 1;
956
957        a = (v & 1) ^ (a << 1);
958    }
959
960    return a;
961}
962
963
964/*
965 * Checks to see if crypto is already enabled. If crypto isn't enable,
966 * "hifn_enable_crypto" is called to enable it. The check is important,
967 * as enabling crypto twice will lock the board.
968 */
969static int
970hifn_enable_crypto(struct hifn_softc *sc)
971{
972    u_int32_t dmacfg, ramcfg, encl, addr, i;
973    char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
974                      0x00, 0x00, 0x00, 0x00 };
975
976    DPRINTF("%s()\n", __FUNCTION__);
977
978    ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
979    dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
980
981    /*
982     * The RAM config register's encrypt level bit needs to be set before
983     * every read performed on the encryption level register.
984     */
985    WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
986
987    encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
988
989    /*
990     * Make sure we don't re-unlock. Two unlocks kills chip until the
991     * next reboot.
992     */
993    if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
994#ifdef HIFN_DEBUG
995        if (hifn_debug)
996            device_printf(sc->sc_dev,
997                "Strong crypto already enabled!\n");
998#endif
999        goto report;
1000    }
1001
1002    if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1003#ifdef HIFN_DEBUG
1004        if (hifn_debug)
1005            device_printf(sc->sc_dev,
1006                  "Unknown encryption level 0x%x\n", encl);
1007#endif
1008        return 1;
1009    }
1010
1011    WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1012        HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1013    DELAY(1000);
1014    addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1015    DELAY(1000);
1016    WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1017    DELAY(1000);
1018
1019    for (i = 0; i <= 12; i++) {
1020        addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1021        WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1022
1023        DELAY(1000);
1024    }
1025
1026    WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1027    encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1028
1029#ifdef HIFN_DEBUG
1030    if (hifn_debug) {
1031        if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1032            device_printf(sc->sc_dev, "Engine is permanently "
1033                "locked until next system reset!\n");
1034        else
1035            device_printf(sc->sc_dev, "Engine enabled "
1036                "successfully!\n");
1037    }
1038#endif
1039
1040report:
1041    WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1042    WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1043
1044    switch (encl) {
1045    case HIFN_PUSTAT_ENA_1:
1046    case HIFN_PUSTAT_ENA_2:
1047        break;
1048    case HIFN_PUSTAT_ENA_0:
1049    default:
1050        device_printf(sc->sc_dev, "disabled\n");
1051        break;
1052    }
1053
1054    return 0;
1055}
1056
1057/*
1058 * Give initial values to the registers listed in the "Register Space"
1059 * section of the HIFN Software Development reference manual.
1060 */
1061static void
1062hifn_init_pci_registers(struct hifn_softc *sc)
1063{
1064    DPRINTF("%s()\n", __FUNCTION__);
1065
1066    /* write fixed values needed by the Initialization registers */
1067    WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1068    WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1069    WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1070
1071    /* write all 4 ring address registers */
1072    WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1073        offsetof(struct hifn_dma, cmdr[0]));
1074    WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1075        offsetof(struct hifn_dma, srcr[0]));
1076    WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1077        offsetof(struct hifn_dma, dstr[0]));
1078    WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1079        offsetof(struct hifn_dma, resr[0]));
1080
1081    DELAY(2000);
1082
1083    /* write status register */
1084    WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1085        HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1086        HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1087        HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1088        HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1089        HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1090        HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1091        HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1092        HIFN_DMACSR_S_WAIT |
1093        HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1094        HIFN_DMACSR_C_WAIT |
1095        HIFN_DMACSR_ENGINE |
1096        ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1097        HIFN_DMACSR_PUBDONE : 0) |
1098        ((sc->sc_flags & HIFN_IS_7811) ?
1099        HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1100
1101    sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1102    sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1103        HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1104        HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1105        ((sc->sc_flags & HIFN_IS_7811) ?
1106        HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1107    sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1108    WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1109
1110
1111    if (sc->sc_flags & HIFN_IS_7956) {
1112        u_int32_t pll;
1113
1114        WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1115            HIFN_PUCNFG_TCALLPHASES |
1116            HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1117
1118        /* turn off the clocks and insure bypass is set */
1119        pll = READ_REG_1(sc, HIFN_1_PLL);
1120        pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1121          | HIFN_PLL_BP | HIFN_PLL_MBSET;
1122        WRITE_REG_1(sc, HIFN_1_PLL, pll);
1123        DELAY(10*1000); /* 10ms */
1124
1125        /* change configuration */
1126        pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1127        WRITE_REG_1(sc, HIFN_1_PLL, pll);
1128        DELAY(10*1000); /* 10ms */
1129
1130        /* disable bypass */
1131        pll &= ~HIFN_PLL_BP;
1132        WRITE_REG_1(sc, HIFN_1_PLL, pll);
1133        /* enable clocks with new configuration */
1134        pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1135        WRITE_REG_1(sc, HIFN_1_PLL, pll);
1136    } else {
1137        WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1138            HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1139            HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1140            (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1141    }
1142
1143    WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1144    WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1145        HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1146        ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1147        ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1148}
1149
1150/*
1151 * The maximum number of sessions supported by the card
1152 * is dependent on the amount of context ram, which
1153 * encryption algorithms are enabled, and how compression
1154 * is configured. This should be configured before this
1155 * routine is called.
1156 */
1157static void
1158hifn_sessions(struct hifn_softc *sc)
1159{
1160    u_int32_t pucnfg;
1161    int ctxsize;
1162
1163    DPRINTF("%s()\n", __FUNCTION__);
1164
1165    pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1166
1167    if (pucnfg & HIFN_PUCNFG_COMPSING) {
1168        if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1169            ctxsize = 128;
1170        else
1171            ctxsize = 512;
1172        /*
1173         * 7955/7956 has internal context memory of 32K
1174         */
1175        if (sc->sc_flags & HIFN_IS_7956)
1176            sc->sc_maxses = 32768 / ctxsize;
1177        else
1178            sc->sc_maxses = 1 +
1179                ((sc->sc_ramsize - 32768) / ctxsize);
1180    } else
1181        sc->sc_maxses = sc->sc_ramsize / 16384;
1182
1183    if (sc->sc_maxses > 2048)
1184        sc->sc_maxses = 2048;
1185}
1186
1187/*
1188 * Determine ram type (sram or dram). Board should be just out of a reset
1189 * state when this is called.
1190 */
1191static int
1192hifn_ramtype(struct hifn_softc *sc)
1193{
1194    u_int8_t data[8], dataexpect[8];
1195    int i;
1196
1197    for (i = 0; i < sizeof(data); i++)
1198        data[i] = dataexpect[i] = 0x55;
1199    if (hifn_writeramaddr(sc, 0, data))
1200        return (-1);
1201    if (hifn_readramaddr(sc, 0, data))
1202        return (-1);
1203    if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1204        sc->sc_drammodel = 1;
1205        return (0);
1206    }
1207
1208    for (i = 0; i < sizeof(data); i++)
1209        data[i] = dataexpect[i] = 0xaa;
1210    if (hifn_writeramaddr(sc, 0, data))
1211        return (-1);
1212    if (hifn_readramaddr(sc, 0, data))
1213        return (-1);
1214    if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1215        sc->sc_drammodel = 1;
1216        return (0);
1217    }
1218
1219    return (0);
1220}
1221
1222#define HIFN_SRAM_MAX (32 << 20)
1223#define HIFN_SRAM_STEP_SIZE 16384
1224#define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1225
1226static int
1227hifn_sramsize(struct hifn_softc *sc)
1228{
1229    u_int32_t a;
1230    u_int8_t data[8];
1231    u_int8_t dataexpect[sizeof(data)];
1232    int32_t i;
1233
1234    for (i = 0; i < sizeof(data); i++)
1235        data[i] = dataexpect[i] = i ^ 0x5a;
1236
1237    for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1238        a = i * HIFN_SRAM_STEP_SIZE;
1239        bcopy(&i, data, sizeof(i));
1240        hifn_writeramaddr(sc, a, data);
1241    }
1242
1243    for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1244        a = i * HIFN_SRAM_STEP_SIZE;
1245        bcopy(&i, dataexpect, sizeof(i));
1246        if (hifn_readramaddr(sc, a, data) < 0)
1247            return (0);
1248        if (bcmp(data, dataexpect, sizeof(data)) != 0)
1249            return (0);
1250        sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1251    }
1252
1253    return (0);
1254}
1255
1256/*
1257 * XXX For dram boards, one should really try all of the
1258 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1259 * is already set up correctly.
1260 */
1261static int
1262hifn_dramsize(struct hifn_softc *sc)
1263{
1264    u_int32_t cnfg;
1265
1266    if (sc->sc_flags & HIFN_IS_7956) {
1267        /*
1268         * 7955/7956 have a fixed internal ram of only 32K.
1269         */
1270        sc->sc_ramsize = 32768;
1271    } else {
1272        cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1273            HIFN_PUCNFG_DRAMMASK;
1274        sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1275    }
1276    return (0);
1277}
1278
1279static void
1280hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1281{
1282    struct hifn_dma *dma = sc->sc_dma;
1283
1284    DPRINTF("%s()\n", __FUNCTION__);
1285
1286    if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1287        dma->cmdi = 0;
1288        dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1289        wmb();
1290        dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
1291        HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1292            BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1293    }
1294    *cmdp = dma->cmdi++;
1295    dma->cmdk = dma->cmdi;
1296
1297    if (dma->srci == HIFN_D_SRC_RSIZE) {
1298        dma->srci = 0;
1299        dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1300        wmb();
1301        dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
1302        HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1303            BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1304    }
1305    *srcp = dma->srci++;
1306    dma->srck = dma->srci;
1307
1308    if (dma->dsti == HIFN_D_DST_RSIZE) {
1309        dma->dsti = 0;
1310        dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1311        wmb();
1312        dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
1313        HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1314            BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1315    }
1316    *dstp = dma->dsti++;
1317    dma->dstk = dma->dsti;
1318
1319    if (dma->resi == HIFN_D_RES_RSIZE) {
1320        dma->resi = 0;
1321        dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1322        wmb();
1323        dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
1324        HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1325            BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1326    }
1327    *resp = dma->resi++;
1328    dma->resk = dma->resi;
1329}
1330
1331static int
1332hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1333{
1334    struct hifn_dma *dma = sc->sc_dma;
1335    hifn_base_command_t wc;
1336    const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1337    int r, cmdi, resi, srci, dsti;
1338
1339    DPRINTF("%s()\n", __FUNCTION__);
1340
1341    wc.masks = htole16(3 << 13);
1342    wc.session_num = htole16(addr >> 14);
1343    wc.total_source_count = htole16(8);
1344    wc.total_dest_count = htole16(addr & 0x3fff);
1345
1346    hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1347
1348    WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1349        HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1350        HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1351
1352    /* build write command */
1353    bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1354    *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1355    bcopy(data, &dma->test_src, sizeof(dma->test_src));
1356
1357    dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1358        + offsetof(struct hifn_dma, test_src));
1359    dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1360        + offsetof(struct hifn_dma, test_dst));
1361
1362    dma->cmdr[cmdi].l = htole32(16 | masks);
1363    dma->srcr[srci].l = htole32(8 | masks);
1364    dma->dstr[dsti].l = htole32(4 | masks);
1365    dma->resr[resi].l = htole32(4 | masks);
1366
1367    for (r = 10000; r >= 0; r--) {
1368        DELAY(10);
1369        if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1370            break;
1371    }
1372    if (r == 0) {
1373        device_printf(sc->sc_dev, "writeramaddr -- "
1374            "result[%d](addr %d) still valid\n", resi, addr);
1375        r = -1;
1376        return (-1);
1377    } else
1378        r = 0;
1379
1380    WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1381        HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1382        HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1383
1384    return (r);
1385}
1386
1387static int
1388hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1389{
1390    struct hifn_dma *dma = sc->sc_dma;
1391    hifn_base_command_t rc;
1392    const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1393    int r, cmdi, srci, dsti, resi;
1394
1395    DPRINTF("%s()\n", __FUNCTION__);
1396
1397    rc.masks = htole16(2 << 13);
1398    rc.session_num = htole16(addr >> 14);
1399    rc.total_source_count = htole16(addr & 0x3fff);
1400    rc.total_dest_count = htole16(8);
1401
1402    hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1403
1404    WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1405        HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1406        HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1407
1408    bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1409    *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1410
1411    dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1412        offsetof(struct hifn_dma, test_src));
1413    dma->test_src = 0;
1414    dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr +
1415        offsetof(struct hifn_dma, test_dst));
1416    dma->test_dst = 0;
1417    dma->cmdr[cmdi].l = htole32(8 | masks);
1418    dma->srcr[srci].l = htole32(8 | masks);
1419    dma->dstr[dsti].l = htole32(8 | masks);
1420    dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1421
1422    for (r = 10000; r >= 0; r--) {
1423        DELAY(10);
1424        if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1425            break;
1426    }
1427    if (r == 0) {
1428        device_printf(sc->sc_dev, "readramaddr -- "
1429            "result[%d](addr %d) still valid\n", resi, addr);
1430        r = -1;
1431    } else {
1432        r = 0;
1433        bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1434    }
1435
1436    WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1437        HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1438        HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1439
1440    return (r);
1441}
1442
1443/*
1444 * Initialize the descriptor rings.
1445 */
1446static void
1447hifn_init_dma(struct hifn_softc *sc)
1448{
1449    struct hifn_dma *dma = sc->sc_dma;
1450    int i;
1451
1452    DPRINTF("%s()\n", __FUNCTION__);
1453
1454    hifn_set_retry(sc);
1455
1456    /* initialize static pointer values */
1457    for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1458        dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1459            offsetof(struct hifn_dma, command_bufs[i][0]));
1460    for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1461        dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1462            offsetof(struct hifn_dma, result_bufs[i][0]));
1463
1464    dma->cmdr[HIFN_D_CMD_RSIZE].p =
1465        htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1466    dma->srcr[HIFN_D_SRC_RSIZE].p =
1467        htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1468    dma->dstr[HIFN_D_DST_RSIZE].p =
1469        htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1470    dma->resr[HIFN_D_RES_RSIZE].p =
1471        htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1472
1473    dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1474    dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1475    dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1476}
1477
1478/*
1479 * Writes out the raw command buffer space. Returns the
1480 * command buffer size.
1481 */
1482static u_int
1483hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1484{
1485    struct hifn_softc *sc = NULL;
1486    u_int8_t *buf_pos;
1487    hifn_base_command_t *base_cmd;
1488    hifn_mac_command_t *mac_cmd;
1489    hifn_crypt_command_t *cry_cmd;
1490    int using_mac, using_crypt, len, ivlen;
1491    u_int32_t dlen, slen;
1492
1493    DPRINTF("%s()\n", __FUNCTION__);
1494
1495    buf_pos = buf;
1496    using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1497    using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1498
1499    base_cmd = (hifn_base_command_t *)buf_pos;
1500    base_cmd->masks = htole16(cmd->base_masks);
1501    slen = cmd->src_mapsize;
1502    if (cmd->sloplen)
1503        dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1504    else
1505        dlen = cmd->dst_mapsize;
1506    base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1507    base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1508    dlen >>= 16;
1509    slen >>= 16;
1510    base_cmd->session_num = htole16(
1511        ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1512        ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1513    buf_pos += sizeof(hifn_base_command_t);
1514
1515    if (using_mac) {
1516        mac_cmd = (hifn_mac_command_t *)buf_pos;
1517        dlen = cmd->maccrd->crd_len;
1518        mac_cmd->source_count = htole16(dlen & 0xffff);
1519        dlen >>= 16;
1520        mac_cmd->masks = htole16(cmd->mac_masks |
1521            ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1522        mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1523        mac_cmd->reserved = 0;
1524        buf_pos += sizeof(hifn_mac_command_t);
1525    }
1526
1527    if (using_crypt) {
1528        cry_cmd = (hifn_crypt_command_t *)buf_pos;
1529        dlen = cmd->enccrd->crd_len;
1530        cry_cmd->source_count = htole16(dlen & 0xffff);
1531        dlen >>= 16;
1532        cry_cmd->masks = htole16(cmd->cry_masks |
1533            ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1534        cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1535        cry_cmd->reserved = 0;
1536        buf_pos += sizeof(hifn_crypt_command_t);
1537    }
1538
1539    if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1540        bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1541        buf_pos += HIFN_MAC_KEY_LENGTH;
1542    }
1543
1544    if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1545        switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1546        case HIFN_CRYPT_CMD_ALG_3DES:
1547            bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1548            buf_pos += HIFN_3DES_KEY_LENGTH;
1549            break;
1550        case HIFN_CRYPT_CMD_ALG_DES:
1551            bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1552            buf_pos += HIFN_DES_KEY_LENGTH;
1553            break;
1554        case HIFN_CRYPT_CMD_ALG_RC4:
1555            len = 256;
1556            do {
1557                int clen;
1558
1559                clen = MIN(cmd->cklen, len);
1560                bcopy(cmd->ck, buf_pos, clen);
1561                len -= clen;
1562                buf_pos += clen;
1563            } while (len > 0);
1564            bzero(buf_pos, 4);
1565            buf_pos += 4;
1566            break;
1567        case HIFN_CRYPT_CMD_ALG_AES:
1568            /*
1569             * AES keys are variable 128, 192 and
1570             * 256 bits (16, 24 and 32 bytes).
1571             */
1572            bcopy(cmd->ck, buf_pos, cmd->cklen);
1573            buf_pos += cmd->cklen;
1574            break;
1575        }
1576    }
1577
1578    if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1579        switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1580        case HIFN_CRYPT_CMD_ALG_AES:
1581            ivlen = HIFN_AES_IV_LENGTH;
1582            break;
1583        default:
1584            ivlen = HIFN_IV_LENGTH;
1585            break;
1586        }
1587        bcopy(cmd->iv, buf_pos, ivlen);
1588        buf_pos += ivlen;
1589    }
1590
1591    if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1592        bzero(buf_pos, 8);
1593        buf_pos += 8;
1594    }
1595
1596    return (buf_pos - buf);
1597}
1598
1599static int
1600hifn_dmamap_aligned(struct hifn_operand *op)
1601{
1602    struct hifn_softc *sc = NULL;
1603    int i;
1604
1605    DPRINTF("%s()\n", __FUNCTION__);
1606
1607    for (i = 0; i < op->nsegs; i++) {
1608        if (op->segs[i].ds_addr & 3)
1609            return (0);
1610        if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1611            return (0);
1612    }
1613    return (1);
1614}
1615
1616static __inline int
1617hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1618{
1619    struct hifn_dma *dma = sc->sc_dma;
1620
1621    if (++idx == HIFN_D_DST_RSIZE) {
1622        dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1623            HIFN_D_MASKDONEIRQ);
1624        HIFN_DSTR_SYNC(sc, idx,
1625            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1626        idx = 0;
1627    }
1628    return (idx);
1629}
1630
1631static int
1632hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1633{
1634    struct hifn_dma *dma = sc->sc_dma;
1635    struct hifn_operand *dst = &cmd->dst;
1636    u_int32_t p, l;
1637    int idx, used = 0, i;
1638
1639    DPRINTF("%s()\n", __FUNCTION__);
1640
1641    idx = dma->dsti;
1642    for (i = 0; i < dst->nsegs - 1; i++) {
1643        dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1644        dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1645        wmb();
1646        dma->dstr[idx].l |= htole32(HIFN_D_VALID);
1647        HIFN_DSTR_SYNC(sc, idx,
1648            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1649        used++;
1650
1651        idx = hifn_dmamap_dstwrap(sc, idx);
1652    }
1653
1654    if (cmd->sloplen == 0) {
1655        p = dst->segs[i].ds_addr;
1656        l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1657            dst->segs[i].ds_len;
1658    } else {
1659        p = sc->sc_dma_physaddr +
1660            offsetof(struct hifn_dma, slop[cmd->slopidx]);
1661        l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1662            sizeof(u_int32_t);
1663
1664        if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1665            dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1666            dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
1667                (dst->segs[i].ds_len - cmd->sloplen));
1668            wmb();
1669            dma->dstr[idx].l |= htole32(HIFN_D_VALID);
1670            HIFN_DSTR_SYNC(sc, idx,
1671                BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1672            used++;
1673
1674            idx = hifn_dmamap_dstwrap(sc, idx);
1675        }
1676    }
1677    dma->dstr[idx].p = htole32(p);
1678    dma->dstr[idx].l = htole32(l);
1679    wmb();
1680    dma->dstr[idx].l |= htole32(HIFN_D_VALID);
1681    HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1682    used++;
1683
1684    idx = hifn_dmamap_dstwrap(sc, idx);
1685
1686    dma->dsti = idx;
1687    dma->dstu += used;
1688    return (idx);
1689}
1690
1691static __inline int
1692hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1693{
1694    struct hifn_dma *dma = sc->sc_dma;
1695
1696    if (++idx == HIFN_D_SRC_RSIZE) {
1697        dma->srcr[idx].l = htole32(HIFN_D_VALID |
1698            HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1699        HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1700            BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1701        idx = 0;
1702    }
1703    return (idx);
1704}
1705
1706static int
1707hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1708{
1709    struct hifn_dma *dma = sc->sc_dma;
1710    struct hifn_operand *src = &cmd->src;
1711    int idx, i;
1712    u_int32_t last = 0;
1713
1714    DPRINTF("%s()\n", __FUNCTION__);
1715
1716    idx = dma->srci;
1717    for (i = 0; i < src->nsegs; i++) {
1718        if (i == src->nsegs - 1)
1719            last = HIFN_D_LAST;
1720
1721        dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1722        dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1723            HIFN_D_MASKDONEIRQ | last);
1724        wmb();
1725        dma->srcr[idx].l |= htole32(HIFN_D_VALID);
1726        HIFN_SRCR_SYNC(sc, idx,
1727            BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1728
1729        idx = hifn_dmamap_srcwrap(sc, idx);
1730    }
1731    dma->srci = idx;
1732    dma->srcu += src->nsegs;
1733    return (idx);
1734}
1735
1736
1737static int
1738hifn_crypto(
1739    struct hifn_softc *sc,
1740    struct hifn_command *cmd,
1741    struct cryptop *crp,
1742    int hint)
1743{
1744    struct hifn_dma *dma = sc->sc_dma;
1745    u_int32_t cmdlen, csr;
1746    int cmdi, resi, err = 0;
1747    unsigned long l_flags;
1748
1749    DPRINTF("%s()\n", __FUNCTION__);
1750
1751    /*
1752     * need 1 cmd, and 1 res
1753     *
1754     * NB: check this first since it's easy.
1755     */
1756    HIFN_LOCK(sc);
1757    if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1758        (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1759#ifdef HIFN_DEBUG
1760        if (hifn_debug) {
1761            device_printf(sc->sc_dev,
1762                "cmd/result exhaustion, cmdu %u resu %u\n",
1763                dma->cmdu, dma->resu);
1764        }
1765#endif
1766        hifnstats.hst_nomem_cr++;
1767        sc->sc_needwakeup |= CRYPTO_SYMQ;
1768        HIFN_UNLOCK(sc);
1769        return (ERESTART);
1770    }
1771
1772    if (crp->crp_flags & CRYPTO_F_SKBUF) {
1773        if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
1774            hifnstats.hst_nomem_load++;
1775            err = ENOMEM;
1776            goto err_srcmap1;
1777        }
1778    } else if (crp->crp_flags & CRYPTO_F_IOV) {
1779        if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
1780            hifnstats.hst_nomem_load++;
1781            err = ENOMEM;
1782            goto err_srcmap1;
1783        }
1784    } else {
1785        if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
1786            hifnstats.hst_nomem_load++;
1787            err = ENOMEM;
1788            goto err_srcmap1;
1789        }
1790    }
1791
1792    if (hifn_dmamap_aligned(&cmd->src)) {
1793        cmd->sloplen = cmd->src_mapsize & 3;
1794        cmd->dst = cmd->src;
1795    } else {
1796        if (crp->crp_flags & CRYPTO_F_IOV) {
1797            DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
1798            err = EINVAL;
1799            goto err_srcmap;
1800        } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
1801#ifdef NOTYET
1802            int totlen, len;
1803            struct mbuf *m, *m0, *mlast;
1804
1805            KASSERT(cmd->dst_m == cmd->src_m,
1806                ("hifn_crypto: dst_m initialized improperly"));
1807            hifnstats.hst_unaligned++;
1808            /*
1809             * Source is not aligned on a longword boundary.
1810             * Copy the data to insure alignment. If we fail
1811             * to allocate mbufs or clusters while doing this
1812             * we return ERESTART so the operation is requeued
1813             * at the crypto later, but only if there are
1814             * ops already posted to the hardware; otherwise we
1815             * have no guarantee that we'll be re-entered.
1816             */
1817            totlen = cmd->src_mapsize;
1818            if (cmd->src_m->m_flags & M_PKTHDR) {
1819                len = MHLEN;
1820                MGETHDR(m0, M_DONTWAIT, MT_DATA);
1821                if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
1822                    m_free(m0);
1823                    m0 = NULL;
1824                }
1825            } else {
1826                len = MLEN;
1827                MGET(m0, M_DONTWAIT, MT_DATA);
1828            }
1829            if (m0 == NULL) {
1830                hifnstats.hst_nomem_mbuf++;
1831                err = dma->cmdu ? ERESTART : ENOMEM;
1832                goto err_srcmap;
1833            }
1834            if (totlen >= MINCLSIZE) {
1835                MCLGET(m0, M_DONTWAIT);
1836                if ((m0->m_flags & M_EXT) == 0) {
1837                    hifnstats.hst_nomem_mcl++;
1838                    err = dma->cmdu ? ERESTART : ENOMEM;
1839                    m_freem(m0);
1840                    goto err_srcmap;
1841                }
1842                len = MCLBYTES;
1843            }
1844            totlen -= len;
1845            m0->m_pkthdr.len = m0->m_len = len;
1846            mlast = m0;
1847
1848            while (totlen > 0) {
1849                MGET(m, M_DONTWAIT, MT_DATA);
1850                if (m == NULL) {
1851                    hifnstats.hst_nomem_mbuf++;
1852                    err = dma->cmdu ? ERESTART : ENOMEM;
1853                    m_freem(m0);
1854                    goto err_srcmap;
1855                }
1856                len = MLEN;
1857                if (totlen >= MINCLSIZE) {
1858                    MCLGET(m, M_DONTWAIT);
1859                    if ((m->m_flags & M_EXT) == 0) {
1860                        hifnstats.hst_nomem_mcl++;
1861                        err = dma->cmdu ? ERESTART : ENOMEM;
1862                        mlast->m_next = m;
1863                        m_freem(m0);
1864                        goto err_srcmap;
1865                    }
1866                    len = MCLBYTES;
1867                }
1868
1869                m->m_len = len;
1870                m0->m_pkthdr.len += len;
1871                totlen -= len;
1872
1873                mlast->m_next = m;
1874                mlast = m;
1875            }
1876            cmd->dst_m = m0;
1877#else
1878            device_printf(sc->sc_dev,
1879                    "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
1880                    __FILE__, __LINE__);
1881            err = EINVAL;
1882            goto err_srcmap;
1883#endif
1884        } else {
1885            device_printf(sc->sc_dev,
1886                    "%s,%d: unaligned contig buffers not implemented\n",
1887                    __FILE__, __LINE__);
1888            err = EINVAL;
1889            goto err_srcmap;
1890        }
1891    }
1892
1893    if (cmd->dst_map == NULL) {
1894        if (crp->crp_flags & CRYPTO_F_SKBUF) {
1895            if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
1896                hifnstats.hst_nomem_map++;
1897                err = ENOMEM;
1898                goto err_dstmap1;
1899            }
1900        } else if (crp->crp_flags & CRYPTO_F_IOV) {
1901            if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
1902                hifnstats.hst_nomem_load++;
1903                err = ENOMEM;
1904                goto err_dstmap1;
1905            }
1906        } else {
1907            if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
1908                hifnstats.hst_nomem_load++;
1909                err = ENOMEM;
1910                goto err_dstmap1;
1911            }
1912        }
1913    }
1914
1915#ifdef HIFN_DEBUG
1916    if (hifn_debug) {
1917        device_printf(sc->sc_dev,
1918            "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1919            READ_REG_1(sc, HIFN_1_DMA_CSR),
1920            READ_REG_1(sc, HIFN_1_DMA_IER),
1921            dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1922            cmd->src_nsegs, cmd->dst_nsegs);
1923    }
1924#endif
1925
1926#if 0
1927    if (cmd->src_map == cmd->dst_map) {
1928        bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1929            BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1930    } else {
1931        bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1932            BUS_DMASYNC_PREWRITE);
1933        bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1934            BUS_DMASYNC_PREREAD);
1935    }
1936#endif
1937
1938    /*
1939     * need N src, and N dst
1940     */
1941    if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
1942        (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
1943#ifdef HIFN_DEBUG
1944        if (hifn_debug) {
1945            device_printf(sc->sc_dev,
1946                "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1947                dma->srcu, cmd->src_nsegs,
1948                dma->dstu, cmd->dst_nsegs);
1949        }
1950#endif
1951        hifnstats.hst_nomem_sd++;
1952        err = ERESTART;
1953        goto err_dstmap;
1954    }
1955
1956    if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1957        dma->cmdi = 0;
1958        dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1959        wmb();
1960        dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
1961        HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1962            BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1963    }
1964    cmdi = dma->cmdi++;
1965    cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1966    HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1967
1968    /* .p for command/result already set */
1969    dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
1970        HIFN_D_MASKDONEIRQ);
1971    wmb();
1972    dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
1973    HIFN_CMDR_SYNC(sc, cmdi,
1974        BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1975    dma->cmdu++;
1976
1977    /*
1978     * We don't worry about missing an interrupt (which a "command wait"
1979     * interrupt salvages us from), unless there is more than one command
1980     * in the queue.
1981     */
1982    if (dma->cmdu > 1) {
1983        sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1984        WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1985    }
1986
1987    hifnstats.hst_ipackets++;
1988    hifnstats.hst_ibytes += cmd->src_mapsize;
1989
1990    hifn_dmamap_load_src(sc, cmd);
1991
1992    /*
1993     * Unlike other descriptors, we don't mask done interrupt from
1994     * result descriptor.
1995     */
1996#ifdef HIFN_DEBUG
1997    if (hifn_debug)
1998        device_printf(sc->sc_dev, "load res\n");
1999#endif
2000    if (dma->resi == HIFN_D_RES_RSIZE) {
2001        dma->resi = 0;
2002        dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
2003        wmb();
2004        dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
2005        HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2006            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2007    }
2008    resi = dma->resi++;
2009    KASSERT(dma->hifn_commands[resi] == NULL,
2010        ("hifn_crypto: command slot %u busy", resi));
2011    dma->hifn_commands[resi] = cmd;
2012    HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2013    if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2014        dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2015            HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2016        wmb();
2017        dma->resr[resi].l |= htole32(HIFN_D_VALID);
2018        sc->sc_curbatch++;
2019        if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2020            hifnstats.hst_maxbatch = sc->sc_curbatch;
2021        hifnstats.hst_totbatch++;
2022    } else {
2023        dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
2024        wmb();
2025        dma->resr[resi].l |= htole32(HIFN_D_VALID);
2026        sc->sc_curbatch = 0;
2027    }
2028    HIFN_RESR_SYNC(sc, resi,
2029        BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2030    dma->resu++;
2031
2032    if (cmd->sloplen)
2033        cmd->slopidx = resi;
2034
2035    hifn_dmamap_load_dst(sc, cmd);
2036
2037    csr = 0;
2038    if (sc->sc_c_busy == 0) {
2039        csr |= HIFN_DMACSR_C_CTRL_ENA;
2040        sc->sc_c_busy = 1;
2041    }
2042    if (sc->sc_s_busy == 0) {
2043        csr |= HIFN_DMACSR_S_CTRL_ENA;
2044        sc->sc_s_busy = 1;
2045    }
2046    if (sc->sc_r_busy == 0) {
2047        csr |= HIFN_DMACSR_R_CTRL_ENA;
2048        sc->sc_r_busy = 1;
2049    }
2050    if (sc->sc_d_busy == 0) {
2051        csr |= HIFN_DMACSR_D_CTRL_ENA;
2052        sc->sc_d_busy = 1;
2053    }
2054    if (csr)
2055        WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2056
2057#ifdef HIFN_DEBUG
2058    if (hifn_debug) {
2059        device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2060            READ_REG_1(sc, HIFN_1_DMA_CSR),
2061            READ_REG_1(sc, HIFN_1_DMA_IER));
2062    }
2063#endif
2064
2065    sc->sc_active = 5;
2066    HIFN_UNLOCK(sc);
2067    KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2068    return (err); /* success */
2069
2070err_dstmap:
2071    if (cmd->src_map != cmd->dst_map)
2072        pci_unmap_buf(sc, &cmd->dst);
2073err_dstmap1:
2074err_srcmap:
2075    if (crp->crp_flags & CRYPTO_F_SKBUF) {
2076        if (cmd->src_skb != cmd->dst_skb)
2077#ifdef NOTYET
2078            m_freem(cmd->dst_m);
2079#else
2080            device_printf(sc->sc_dev,
2081                    "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2082                    __FILE__, __LINE__);
2083#endif
2084    }
2085    pci_unmap_buf(sc, &cmd->src);
2086err_srcmap1:
2087    HIFN_UNLOCK(sc);
2088    return (err);
2089}
2090
2091static void
2092hifn_tick(unsigned long arg)
2093{
2094    struct hifn_softc *sc;
2095    unsigned long l_flags;
2096
2097    if (arg >= HIFN_MAX_CHIPS)
2098        return;
2099    sc = hifn_chip_idx[arg];
2100    if (!sc)
2101        return;
2102
2103    HIFN_LOCK(sc);
2104    if (sc->sc_active == 0) {
2105        struct hifn_dma *dma = sc->sc_dma;
2106        u_int32_t r = 0;
2107
2108        if (dma->cmdu == 0 && sc->sc_c_busy) {
2109            sc->sc_c_busy = 0;
2110            r |= HIFN_DMACSR_C_CTRL_DIS;
2111        }
2112        if (dma->srcu == 0 && sc->sc_s_busy) {
2113            sc->sc_s_busy = 0;
2114            r |= HIFN_DMACSR_S_CTRL_DIS;
2115        }
2116        if (dma->dstu == 0 && sc->sc_d_busy) {
2117            sc->sc_d_busy = 0;
2118            r |= HIFN_DMACSR_D_CTRL_DIS;
2119        }
2120        if (dma->resu == 0 && sc->sc_r_busy) {
2121            sc->sc_r_busy = 0;
2122            r |= HIFN_DMACSR_R_CTRL_DIS;
2123        }
2124        if (r)
2125            WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2126    } else
2127        sc->sc_active--;
2128    HIFN_UNLOCK(sc);
2129    mod_timer(&sc->sc_tickto, jiffies + HZ);
2130}
2131
2132static irqreturn_t
2133#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
2134hifn_intr(int irq, void *arg)
2135#else
2136hifn_intr(int irq, void *arg, struct pt_regs *regs)
2137#endif
2138{
2139    struct hifn_softc *sc = arg;
2140    struct hifn_dma *dma;
2141    u_int32_t dmacsr, restart;
2142    int i, u;
2143    unsigned long l_flags;
2144
2145    dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2146
2147    /* Nothing in the DMA unit interrupted */
2148    if ((dmacsr & sc->sc_dmaier) == 0)
2149        return IRQ_NONE;
2150
2151    HIFN_LOCK(sc);
2152
2153    dma = sc->sc_dma;
2154
2155#ifdef HIFN_DEBUG
2156    if (hifn_debug) {
2157        device_printf(sc->sc_dev,
2158            "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2159            dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2160            dma->cmdi, dma->srci, dma->dsti, dma->resi,
2161            dma->cmdk, dma->srck, dma->dstk, dma->resk,
2162            dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2163    }
2164#endif
2165
2166    WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2167
2168    if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2169        (dmacsr & HIFN_DMACSR_PUBDONE))
2170        WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2171            READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2172
2173    restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2174    if (restart)
2175        device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2176
2177    if (sc->sc_flags & HIFN_IS_7811) {
2178        if (dmacsr & HIFN_DMACSR_ILLR)
2179            device_printf(sc->sc_dev, "illegal read\n");
2180        if (dmacsr & HIFN_DMACSR_ILLW)
2181            device_printf(sc->sc_dev, "illegal write\n");
2182    }
2183
2184    restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2185        HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2186    if (restart) {
2187        device_printf(sc->sc_dev, "abort, resetting.\n");
2188        hifnstats.hst_abort++;
2189        hifn_abort(sc);
2190        HIFN_UNLOCK(sc);
2191        return IRQ_HANDLED;
2192    }
2193
2194    if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2195        /*
2196         * If no slots to process and we receive a "waiting on
2197         * command" interrupt, we disable the "waiting on command"
2198         * (by clearing it).
2199         */
2200        sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2201        WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2202    }
2203
2204    /* clear the rings */
2205    i = dma->resk; u = dma->resu;
2206    while (u != 0) {
2207        HIFN_RESR_SYNC(sc, i,
2208            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2209        if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2210            HIFN_RESR_SYNC(sc, i,
2211                BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2212            break;
2213        }
2214
2215        if (i != HIFN_D_RES_RSIZE) {
2216            struct hifn_command *cmd;
2217            u_int8_t *macbuf = NULL;
2218
2219            HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2220            cmd = dma->hifn_commands[i];
2221            KASSERT(cmd != NULL,
2222                ("hifn_intr: null command slot %u", i));
2223            dma->hifn_commands[i] = NULL;
2224
2225            if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2226                macbuf = dma->result_bufs[i];
2227                macbuf += 12;
2228            }
2229
2230            hifn_callback(sc, cmd, macbuf);
2231            hifnstats.hst_opackets++;
2232            u--;
2233        }
2234
2235        if (++i == (HIFN_D_RES_RSIZE + 1))
2236            i = 0;
2237    }
2238    dma->resk = i; dma->resu = u;
2239
2240    i = dma->srck; u = dma->srcu;
2241    while (u != 0) {
2242        if (i == HIFN_D_SRC_RSIZE)
2243            i = 0;
2244        HIFN_SRCR_SYNC(sc, i,
2245            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2246        if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2247            HIFN_SRCR_SYNC(sc, i,
2248                BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2249            break;
2250        }
2251        i++, u--;
2252    }
2253    dma->srck = i; dma->srcu = u;
2254
2255    i = dma->cmdk; u = dma->cmdu;
2256    while (u != 0) {
2257        HIFN_CMDR_SYNC(sc, i,
2258            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2259        if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2260            HIFN_CMDR_SYNC(sc, i,
2261                BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2262            break;
2263        }
2264        if (i != HIFN_D_CMD_RSIZE) {
2265            u--;
2266            HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2267        }
2268        if (++i == (HIFN_D_CMD_RSIZE + 1))
2269            i = 0;
2270    }
2271    dma->cmdk = i; dma->cmdu = u;
2272
2273    HIFN_UNLOCK(sc);
2274
2275    if (sc->sc_needwakeup) { /* XXX check high watermark */
2276        int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2277#ifdef HIFN_DEBUG
2278        if (hifn_debug)
2279            device_printf(sc->sc_dev,
2280                "wakeup crypto (%x) u %d/%d/%d/%d\n",
2281                sc->sc_needwakeup,
2282                dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2283#endif
2284        sc->sc_needwakeup &= ~wakeup;
2285        crypto_unblock(sc->sc_cid, wakeup);
2286    }
2287
2288    return IRQ_HANDLED;
2289}
2290
2291/*
2292 * Allocate a new 'session' and return an encoded session id. 'sidp'
2293 * contains our registration id, and should contain an encoded session
2294 * id on successful allocation.
2295 */
2296static int
2297hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
2298{
2299    struct hifn_softc *sc = device_get_softc(dev);
2300    struct cryptoini *c;
2301    int mac = 0, cry = 0, sesn;
2302    struct hifn_session *ses = NULL;
2303    unsigned long l_flags;
2304
2305    DPRINTF("%s()\n", __FUNCTION__);
2306
2307    KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2308    if (sidp == NULL || cri == NULL || sc == NULL) {
2309        DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
2310        return (EINVAL);
2311    }
2312
2313    HIFN_LOCK(sc);
2314    if (sc->sc_sessions == NULL) {
2315        ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
2316                SLAB_ATOMIC);
2317        if (ses == NULL) {
2318            HIFN_UNLOCK(sc);
2319            return (ENOMEM);
2320        }
2321        sesn = 0;
2322        sc->sc_nsessions = 1;
2323    } else {
2324        for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2325            if (!sc->sc_sessions[sesn].hs_used) {
2326                ses = &sc->sc_sessions[sesn];
2327                break;
2328            }
2329        }
2330
2331        if (ses == NULL) {
2332            sesn = sc->sc_nsessions;
2333            ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
2334                    SLAB_ATOMIC);
2335            if (ses == NULL) {
2336                HIFN_UNLOCK(sc);
2337                return (ENOMEM);
2338            }
2339            bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2340            bzero(sc->sc_sessions, sesn * sizeof(*ses));
2341            kfree(sc->sc_sessions);
2342            sc->sc_sessions = ses;
2343            ses = &sc->sc_sessions[sesn];
2344            sc->sc_nsessions++;
2345        }
2346    }
2347    HIFN_UNLOCK(sc);
2348
2349    bzero(ses, sizeof(*ses));
2350    ses->hs_used = 1;
2351
2352    for (c = cri; c != NULL; c = c->cri_next) {
2353        switch (c->cri_alg) {
2354        case CRYPTO_MD5:
2355        case CRYPTO_SHA1:
2356        case CRYPTO_MD5_HMAC:
2357        case CRYPTO_SHA1_HMAC:
2358            if (mac) {
2359                DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2360                return (EINVAL);
2361            }
2362            mac = 1;
2363            ses->hs_mlen = c->cri_mlen;
2364            if (ses->hs_mlen == 0) {
2365                switch (c->cri_alg) {
2366                case CRYPTO_MD5:
2367                case CRYPTO_MD5_HMAC:
2368                    ses->hs_mlen = 16;
2369                    break;
2370                case CRYPTO_SHA1:
2371                case CRYPTO_SHA1_HMAC:
2372                    ses->hs_mlen = 20;
2373                    break;
2374                }
2375            }
2376            break;
2377        case CRYPTO_DES_CBC:
2378        case CRYPTO_3DES_CBC:
2379        case CRYPTO_AES_CBC:
2380        case CRYPTO_ARC4:
2381            if (cry) {
2382                DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2383                return (EINVAL);
2384            }
2385            cry = 1;
2386            break;
2387        default:
2388            DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2389            return (EINVAL);
2390        }
2391    }
2392    if (mac == 0 && cry == 0) {
2393        DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2394        return (EINVAL);
2395    }
2396
2397    *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2398
2399    return (0);
2400}
2401
2402/*
2403 * Deallocate a session.
2404 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2405 * XXX to blow away any keys already stored there.
2406 */
2407static int
2408hifn_freesession(device_t dev, u_int64_t tid)
2409{
2410    struct hifn_softc *sc = device_get_softc(dev);
2411    int session, error;
2412    u_int32_t sid = CRYPTO_SESID2LID(tid);
2413    unsigned long l_flags;
2414
2415    DPRINTF("%s()\n", __FUNCTION__);
2416
2417    KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2418    if (sc == NULL) {
2419        DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2420        return (EINVAL);
2421    }
2422
2423    HIFN_LOCK(sc);
2424    session = HIFN_SESSION(sid);
2425    if (session < sc->sc_nsessions) {
2426        bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
2427        error = 0;
2428    } else {
2429        DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2430        error = EINVAL;
2431    }
2432    HIFN_UNLOCK(sc);
2433
2434    return (error);
2435}
2436
2437static int
2438hifn_process(device_t dev, struct cryptop *crp, int hint)
2439{
2440    struct hifn_softc *sc = device_get_softc(dev);
2441    struct hifn_command *cmd = NULL;
2442    int session, err, ivlen;
2443    struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2444
2445    DPRINTF("%s()\n", __FUNCTION__);
2446
2447    if (crp == NULL || crp->crp_callback == NULL) {
2448        hifnstats.hst_invalid++;
2449        DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2450        return (EINVAL);
2451    }
2452    session = HIFN_SESSION(crp->crp_sid);
2453
2454    if (sc == NULL || session >= sc->sc_nsessions) {
2455        DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2456        err = EINVAL;
2457        goto errout;
2458    }
2459
2460    cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
2461    if (cmd == NULL) {
2462        hifnstats.hst_nomem++;
2463        err = ENOMEM;
2464        goto errout;
2465    }
2466    memset(cmd, 0, sizeof(*cmd));
2467
2468    if (crp->crp_flags & CRYPTO_F_SKBUF) {
2469        cmd->src_skb = (struct sk_buff *)crp->crp_buf;
2470        cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
2471    } else if (crp->crp_flags & CRYPTO_F_IOV) {
2472        cmd->src_io = (struct uio *)crp->crp_buf;
2473        cmd->dst_io = (struct uio *)crp->crp_buf;
2474    } else {
2475        cmd->src_buf = crp->crp_buf;
2476        cmd->dst_buf = crp->crp_buf;
2477    }
2478
2479    crd1 = crp->crp_desc;
2480    if (crd1 == NULL) {
2481        DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2482        err = EINVAL;
2483        goto errout;
2484    }
2485    crd2 = crd1->crd_next;
2486
2487    if (crd2 == NULL) {
2488        if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2489            crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2490            crd1->crd_alg == CRYPTO_SHA1 ||
2491            crd1->crd_alg == CRYPTO_MD5) {
2492            maccrd = crd1;
2493            enccrd = NULL;
2494        } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2495            crd1->crd_alg == CRYPTO_3DES_CBC ||
2496            crd1->crd_alg == CRYPTO_AES_CBC ||
2497            crd1->crd_alg == CRYPTO_ARC4) {
2498            if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2499                cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2500            maccrd = NULL;
2501            enccrd = crd1;
2502        } else {
2503            DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2504            err = EINVAL;
2505            goto errout;
2506        }
2507    } else {
2508        if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2509                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2510                     crd1->crd_alg == CRYPTO_MD5 ||
2511                     crd1->crd_alg == CRYPTO_SHA1) &&
2512            (crd2->crd_alg == CRYPTO_DES_CBC ||
2513             crd2->crd_alg == CRYPTO_3DES_CBC ||
2514             crd2->crd_alg == CRYPTO_AES_CBC ||
2515             crd2->crd_alg == CRYPTO_ARC4) &&
2516            ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2517            cmd->base_masks = HIFN_BASE_CMD_DECODE;
2518            maccrd = crd1;
2519            enccrd = crd2;
2520        } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2521             crd1->crd_alg == CRYPTO_ARC4 ||
2522             crd1->crd_alg == CRYPTO_3DES_CBC ||
2523             crd1->crd_alg == CRYPTO_AES_CBC) &&
2524            (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2525                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2526                     crd2->crd_alg == CRYPTO_MD5 ||
2527                     crd2->crd_alg == CRYPTO_SHA1) &&
2528            (crd1->crd_flags & CRD_F_ENCRYPT)) {
2529            enccrd = crd1;
2530            maccrd = crd2;
2531        } else {
2532            /*
2533             * We cannot order the 7751 as requested
2534             */
2535            DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
2536            err = EINVAL;
2537            goto errout;
2538        }
2539    }
2540
2541    if (enccrd) {
2542        cmd->enccrd = enccrd;
2543        cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2544        switch (enccrd->crd_alg) {
2545        case CRYPTO_ARC4:
2546            cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2547            break;
2548        case CRYPTO_DES_CBC:
2549            cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2550                HIFN_CRYPT_CMD_MODE_CBC |
2551                HIFN_CRYPT_CMD_NEW_IV;
2552            break;
2553        case CRYPTO_3DES_CBC:
2554            cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2555                HIFN_CRYPT_CMD_MODE_CBC |
2556                HIFN_CRYPT_CMD_NEW_IV;
2557            break;
2558        case CRYPTO_AES_CBC:
2559            cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2560                HIFN_CRYPT_CMD_MODE_CBC |
2561                HIFN_CRYPT_CMD_NEW_IV;
2562            break;
2563        default:
2564            DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2565            err = EINVAL;
2566            goto errout;
2567        }
2568        if (enccrd->crd_alg != CRYPTO_ARC4) {
2569            ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2570                HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2571            if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2572                if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2573                    bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2574                else
2575                    read_random(cmd->iv, ivlen);
2576
2577                if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2578                    == 0) {
2579                    crypto_copyback(crp->crp_flags,
2580                        crp->crp_buf, enccrd->crd_inject,
2581                        ivlen, cmd->iv);
2582                }
2583            } else {
2584                if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2585                    bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2586                else {
2587                    crypto_copydata(crp->crp_flags,
2588                        crp->crp_buf, enccrd->crd_inject,
2589                        ivlen, cmd->iv);
2590                }
2591            }
2592        }
2593
2594        if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2595            cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2596        cmd->ck = enccrd->crd_key;
2597        cmd->cklen = enccrd->crd_klen >> 3;
2598        cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2599
2600        /*
2601         * Need to specify the size for the AES key in the masks.
2602         */
2603        if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2604            HIFN_CRYPT_CMD_ALG_AES) {
2605            switch (cmd->cklen) {
2606            case 16:
2607                cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2608                break;
2609            case 24:
2610                cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2611                break;
2612            case 32:
2613                cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2614                break;
2615            default:
2616                DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2617                err = EINVAL;
2618                goto errout;
2619            }
2620        }
2621    }
2622
2623    if (maccrd) {
2624        cmd->maccrd = maccrd;
2625        cmd->base_masks |= HIFN_BASE_CMD_MAC;
2626
2627        switch (maccrd->crd_alg) {
2628        case CRYPTO_MD5:
2629            cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2630                HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2631                HIFN_MAC_CMD_POS_IPSEC;
2632                       break;
2633        case CRYPTO_MD5_HMAC:
2634            cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2635                HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2636                HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2637            break;
2638        case CRYPTO_SHA1:
2639            cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2640                HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2641                HIFN_MAC_CMD_POS_IPSEC;
2642            break;
2643        case CRYPTO_SHA1_HMAC:
2644            cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2645                HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2646                HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2647            break;
2648        }
2649
2650        if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2651             maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2652            cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2653            bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2654            bzero(cmd->mac + (maccrd->crd_klen >> 3),
2655                HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2656        }
2657    }
2658
2659    cmd->crp = crp;
2660    cmd->session_num = session;
2661    cmd->softc = sc;
2662
2663    err = hifn_crypto(sc, cmd, crp, hint);
2664    if (!err) {
2665        return 0;
2666    } else if (err == ERESTART) {
2667        /*
2668         * There weren't enough resources to dispatch the request
2669         * to the part. Notify the caller so they'll requeue this
2670         * request and resubmit it again soon.
2671         */
2672#ifdef HIFN_DEBUG
2673        if (hifn_debug)
2674            device_printf(sc->sc_dev, "requeue request\n");
2675#endif
2676        kfree(cmd);
2677        sc->sc_needwakeup |= CRYPTO_SYMQ;
2678        return (err);
2679    }
2680
2681errout:
2682    if (cmd != NULL)
2683        kfree(cmd);
2684    if (err == EINVAL)
2685        hifnstats.hst_invalid++;
2686    else
2687        hifnstats.hst_nomem++;
2688    crp->crp_etype = err;
2689    crypto_done(crp);
2690    return (err);
2691}
2692
2693static void
2694hifn_abort(struct hifn_softc *sc)
2695{
2696    struct hifn_dma *dma = sc->sc_dma;
2697    struct hifn_command *cmd;
2698    struct cryptop *crp;
2699    int i, u;
2700
2701    DPRINTF("%s()\n", __FUNCTION__);
2702
2703    i = dma->resk; u = dma->resu;
2704    while (u != 0) {
2705        cmd = dma->hifn_commands[i];
2706        KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2707        dma->hifn_commands[i] = NULL;
2708        crp = cmd->crp;
2709
2710        if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2711            /* Salvage what we can. */
2712            u_int8_t *macbuf;
2713
2714            if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2715                macbuf = dma->result_bufs[i];
2716                macbuf += 12;
2717            } else
2718                macbuf = NULL;
2719            hifnstats.hst_opackets++;
2720            hifn_callback(sc, cmd, macbuf);
2721        } else {
2722#if 0
2723            if (cmd->src_map == cmd->dst_map) {
2724                bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2725                    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2726            } else {
2727                bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2728                    BUS_DMASYNC_POSTWRITE);
2729                bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2730                    BUS_DMASYNC_POSTREAD);
2731            }
2732#endif
2733
2734            if (cmd->src_skb != cmd->dst_skb) {
2735#ifdef NOTYET
2736                m_freem(cmd->src_m);
2737                crp->crp_buf = (caddr_t)cmd->dst_m;
2738#else
2739                device_printf(sc->sc_dev,
2740                        "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2741                        __FILE__, __LINE__);
2742#endif
2743            }
2744
2745            /* non-shared buffers cannot be restarted */
2746            if (cmd->src_map != cmd->dst_map) {
2747                /*
2748                 * XXX should be EAGAIN, delayed until
2749                 * after the reset.
2750                 */
2751                crp->crp_etype = ENOMEM;
2752                pci_unmap_buf(sc, &cmd->dst);
2753            } else
2754                crp->crp_etype = ENOMEM;
2755
2756            pci_unmap_buf(sc, &cmd->src);
2757
2758            kfree(cmd);
2759            if (crp->crp_etype != EAGAIN)
2760                crypto_done(crp);
2761        }
2762
2763        if (++i == HIFN_D_RES_RSIZE)
2764            i = 0;
2765        u--;
2766    }
2767    dma->resk = i; dma->resu = u;
2768
2769    hifn_reset_board(sc, 1);
2770    hifn_init_dma(sc);
2771    hifn_init_pci_registers(sc);
2772}
2773
2774static void
2775hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2776{
2777    struct hifn_dma *dma = sc->sc_dma;
2778    struct cryptop *crp = cmd->crp;
2779    struct cryptodesc *crd;
2780    int i, u;
2781
2782    DPRINTF("%s()\n", __FUNCTION__);
2783
2784#if 0
2785    if (cmd->src_map == cmd->dst_map) {
2786        bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2787            BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2788    } else {
2789        bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2790            BUS_DMASYNC_POSTWRITE);
2791        bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2792            BUS_DMASYNC_POSTREAD);
2793    }
2794#endif
2795
2796    if (crp->crp_flags & CRYPTO_F_SKBUF) {
2797        if (cmd->src_skb != cmd->dst_skb) {
2798#ifdef NOTYET
2799            crp->crp_buf = (caddr_t)cmd->dst_m;
2800            totlen = cmd->src_mapsize;
2801            for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2802                if (totlen < m->m_len) {
2803                    m->m_len = totlen;
2804                    totlen = 0;
2805                } else
2806                    totlen -= m->m_len;
2807            }
2808            cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2809            m_freem(cmd->src_m);
2810#else
2811            device_printf(sc->sc_dev,
2812                    "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2813                    __FILE__, __LINE__);
2814#endif
2815        }
2816    }
2817
2818    if (cmd->sloplen != 0) {
2819        crypto_copyback(crp->crp_flags, crp->crp_buf,
2820            cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2821            (caddr_t)&dma->slop[cmd->slopidx]);
2822    }
2823
2824    i = dma->dstk; u = dma->dstu;
2825    while (u != 0) {
2826        if (i == HIFN_D_DST_RSIZE)
2827            i = 0;
2828#if 0
2829        bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2830            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2831#endif
2832        if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2833#if 0
2834            bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2835                BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2836#endif
2837            break;
2838        }
2839        i++, u--;
2840    }
2841    dma->dstk = i; dma->dstu = u;
2842
2843    hifnstats.hst_obytes += cmd->dst_mapsize;
2844
2845    if (macbuf != NULL) {
2846        for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2847                        int len;
2848
2849            if (crd->crd_alg != CRYPTO_MD5 &&
2850                crd->crd_alg != CRYPTO_SHA1 &&
2851                crd->crd_alg != CRYPTO_MD5_HMAC &&
2852                crd->crd_alg != CRYPTO_SHA1_HMAC) {
2853                continue;
2854            }
2855            len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
2856            crypto_copyback(crp->crp_flags, crp->crp_buf,
2857                crd->crd_inject, len, macbuf);
2858            break;
2859        }
2860    }
2861
2862    if (cmd->src_map != cmd->dst_map)
2863        pci_unmap_buf(sc, &cmd->dst);
2864    pci_unmap_buf(sc, &cmd->src);
2865    kfree(cmd);
2866    crypto_done(crp);
2867}
2868
2869/*
2870 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2871 * and Group 1 registers; avoid conditions that could create
2872 * burst writes by doing a read in between the writes.
2873 *
2874 * NB: The read we interpose is always to the same register;
2875 * we do this because reading from an arbitrary (e.g. last)
2876 * register may not always work.
2877 */
2878static void
2879hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2880{
2881    if (sc->sc_flags & HIFN_IS_7811) {
2882        if (sc->sc_bar0_lastreg == reg - 4)
2883            readl(sc->sc_bar0 + HIFN_0_PUCNFG);
2884        sc->sc_bar0_lastreg = reg;
2885    }
2886    writel(val, sc->sc_bar0 + reg);
2887}
2888
2889static void
2890hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2891{
2892    if (sc->sc_flags & HIFN_IS_7811) {
2893        if (sc->sc_bar1_lastreg == reg - 4)
2894            readl(sc->sc_bar1 + HIFN_1_REVID);
2895        sc->sc_bar1_lastreg = reg;
2896    }
2897    writel(val, sc->sc_bar1 + reg);
2898}
2899
2900
2901static struct pci_device_id hifn_pci_tbl[] = {
2902    { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
2903      PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2904    { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
2905      PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2906    { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
2907      PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2908    { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
2909      PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2910    { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
2911      PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2912    { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
2913      PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2914    /*
2915     * Other vendors share this PCI ID as well, such as
2916     * http://www.powercrypt.com, and obviously they also
2917     * use the same key.
2918     */
2919    { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
2920      PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2921    { 0, 0, 0, 0, 0, 0, }
2922};
2923MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
2924
2925static struct pci_driver hifn_driver = {
2926    .name = "hifn",
2927    .id_table = hifn_pci_tbl,
2928    .probe = hifn_probe,
2929    .remove = hifn_remove,
2930    /* add PM stuff here one day */
2931};
2932
2933static int __init hifn_init (void)
2934{
2935    struct hifn_softc *sc = NULL;
2936    int rc;
2937
2938    DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
2939
2940    rc = pci_register_driver(&hifn_driver);
2941    pci_register_driver_compat(&hifn_driver, rc);
2942
2943    return rc;
2944}
2945
2946static void __exit hifn_exit (void)
2947{
2948    pci_unregister_driver(&hifn_driver);
2949}
2950
2951module_init(hifn_init);
2952module_exit(hifn_exit);
2953
2954MODULE_LICENSE("BSD");
2955MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
2956MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
2957

Archive Download this file



interactive