Writing Device Drivers

Allocation of DMA Resources

If bp is not NULL and bp->b_bcount is not zero and DMA resources have not yet been allocated for this scsi_pkt(9S), the tran_init_pkt(9E) entry point must allocate DMA resources for a data transfer. The HBA driver needs to keep track of whether DMA resources have been allocated for a particular command with a flag bit or a DMA handle in the per-packet HBA driver private data.

By setting the PKT_DMA_PARTIAL flag in the pkt, the target driver indicates it can accept breaking up the data transfer into multiple SCSI commands to accommodate the complete request. This might be necessary if the HBA hardware scatter-gather capabilities or system DMA resources are insufficient to accommodate the complete request in a single SCSI command.

If the PKT_DMA_PARTIAL flag is set, the HBA driver can set the DDI_DMA_PARTIAL flag when allocating DMA resources (using, for example, ddi_dma_buf_bind_handle(9F)) for this SCSI command. The DMA attributes used when allocating the DMA resources should accurately describe any constraints placed on the ability of the HBA hardware to perform DMA. If the system can only allocate DMA resources for part of the request, ddi_dma_buf_bind_handle(9F) will return DDI_DMA_PARTIAL_MAP.

The tran_init_pkt(9E) entry point must return the amount of DMA resources not allocated for this transfer in the field pkt_resid.

A target driver can make one request to tran_init_pkt(9E) to simultaneously allocate both a scsi_pkt(9S) structure and DMA resources for that pkt. In this case, if the HBA driver is unable to allocate DMA resources, it must free the allocated scsi_pkt(9S) before returning. The scsi_pkt(9S) must be freed by calling scsi_hba_pkt_free(9F).

The target driver might first allocate the scsi_pkt(9S) and allocate DMA resources for this pkt at a later time. In this case, if the HBA driver is unable to allocate DMA resources, it must not free pkt. The target driver in this case is responsible for freeing the pkt.


Example 15-4 HBA Driver Allocation of DMA Resources

static int
isp_i_dma_alloc(
    struct isp         *isp,
    struct scsi_pkt    *pkt,
    struct buf         *bp,
    int                flags,
    int                (*callback)())
{
    struct isp_cmd    *sp  = (struct isp_cmd *)pkt->pkt_ha_private;
    int                dma_flags;
    ddi_dma_attr_t     tmp_dma_attr;
    int                (*cb)(caddr_t);
    int                i;

    ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);

    if (bp->b_flags & B_READ) {
           sp->cmd_flags &= ~CFLAG_DMASEND;
           dma_flags = DDI_DMA_READ;
    } else {
           sp->cmd_flags |= CFLAG_DMASEND;
           dma_flags = DDI_DMA_WRITE;
    }
    if (flags & PKT_CONSISTENT) {
           sp->cmd_flags |= CFLAG_CMDIOPB;
           dma_flags |= DDI_DMA_CONSISTENT;
    }
    if (flags & PKT_DMA_PARTIAL) {
           dma_flags |= DDI_DMA_PARTIAL;
    }

    tmp_dma_attr = isp_dma_attr;
    tmp_dma_attr.dma_attr_burstsizes = isp->isp_burst_size;

    cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT :
DDI_DMA_SLEEP;

    if ((i = ddi_dma_alloc_handle(isp->isp_dip, &tmp_dma_attr,
          cb, 0, &sp->cmd_dmahandle)) != DDI_SUCCESS) {

        switch (i) {
        case DDI_DMA_BADATTR:
            bioerror(bp, EFAULT);
            return (0);

        case DDI_DMA_NORESOURCES:
            bioerror(bp, 0);
            return (0);
        }
    }

    i = ddi_dma_buf_bind_handle(sp->cmd_dmahandle, bp, dma_flags,
        cb, 0, &sp->cmd_dmacookies[0], &sp->cmd_ncookies);

    switch (i) {
    case DDI_DMA_PARTIAL_MAP:
        if (ddi_dma_numwin(sp->cmd_dmahandle, &sp->cmd_nwin) ==
                    DDI_FAILURE) {
            cmn_err(CE_PANIC, "ddi_dma_numwin() failed\n");
        }

        if (ddi_dma_getwin(sp->cmd_dmahandle, sp->cmd_curwin,
            &sp->cmd_dma_offset, &sp->cmd_dma_len,
            &sp->cmd_dmacookies[0], &sp->cmd_ncookies) ==
                   DDI_FAILURE) {
            cmn_err(CE_PANIC, "ddi_dma_getwin() failed\n");
        }
        goto get_dma_cookies;

    case DDI_DMA_MAPPED:
        sp->cmd_nwin = 1;
        sp->cmd_dma_len = 0;
        sp->cmd_dma_offset = 0;

get_dma_cookies:
        i = 0;
        sp->cmd_dmacount = 0;
        for (;;) {
            sp->cmd_dmacount += sp->cmd_dmacookies[i++].dmac_size;

            if (i == ISP_NDATASEGS || i == sp->cmd_ncookies)
                break;
            ddi_dma_nextcookie(sp->cmd_dmahandle,
                &sp->cmd_dmacookies[i]);
        }
        sp->cmd_cookie = i;
        sp->cmd_cookiecnt = i;

        sp->cmd_flags |= CFLAG_DMAVALID;
        pkt->pkt_resid = bp->b_bcount - sp->cmd_dmacount;
        return (1);

    case DDI_DMA_NORESOURCES:
        bioerror(bp, 0);
        break;

    case DDI_DMA_NOMAPPING:
        bioerror(bp, EFAULT);
        break;

    case DDI_DMA_TOOBIG:
        bioerror(bp, EINVAL);
        break;

    case DDI_DMA_INUSE:
        cmn_err(CE_PANIC, "ddi_dma_buf_bind_handle:"
            " DDI_DMA_INUSE impossible\n");

    default:
        cmn_err(CE_PANIC, "ddi_dma_buf_bind_handle:"
            " 0x%x impossible\n", i);
    }

    ddi_dma_free_handle(&sp->cmd_dmahandle);
    sp->cmd_dmahandle = NULL;
    sp->cmd_flags &= ~CFLAG_DMAVALID;
    return (0);
}