Writing Device Drivers

Allocation of DMA Resources

The tran_init_pkt(9E) entry point must allocate DMA resources for a data transfer if the following conditions are true:

The HBA driver needs to track how DMA resources are allocated for a particular command. This allocation can take place with a flag bit or a DMA handle in the per-packet HBA driver private data.

The PKT_DMA_PARTIAL flag in the pkt enables the target driver to break up a data transfer into multiple SCSI commands to accommodate the complete request. This approach is useful when the HBA hardware scatter-gather capabilities or system DMA resources cannot complete a request in a single SCSI command.

The PKT_DMA_PARTIAL flag enables the HBA driver to set the DDI_DMA_PARTIAL flag. The DDI_DMA_PARTIAL flag is useful when the DMA resources for this SCSI command are allocated. For example the ddi_dma_buf_bind_handle(9F)) command can be used to allocate DMA resources. The DMA attributes used when allocating the DMA resources should accurately describe any constraints placed on the ability of the HBA hardware to perform DMA. If the system can only allocate DMA resources for part of the request, ddi_dma_buf_bind_handle(9F) returns DDI_DMA_PARTIAL_MAP.

The tran_init_pkt(9E) entry point must return the amount of DMA resources not allocated for this transfer in the field pkt_resid.

A target driver can make one request to tran_init_pkt(9E) to simultaneously allocate both a scsi_pkt(9S) structure and DMA resources for that pkt. In this case, if the HBA driver is unable to allocate DMA resources, that driver must free the allocated scsi_pkt(9S) before returning. The scsi_pkt(9S) must be freed by calling scsi_hba_pkt_free(9F).

The target driver might first allocate the scsi_pkt(9S) and allocate DMA resources for this pkt at a later time. In this case, if the HBA driver is unable to allocate DMA resources, the driver must not free pkt. The target driver in this case is responsible for freeing the pkt.


Example 18–3 HBA Driver Allocation of DMA Resources

static int
isp_i_dma_alloc(
    struct isp         *isp,
    struct scsi_pkt    *pkt,
    struct buf         *bp,
    int                flags,
    int                (*callback)())
{
    struct isp_cmd     *sp  = (struct isp_cmd *)pkt->pkt_ha_private;
    int                dma_flags;
    ddi_dma_attr_t     tmp_dma_attr;
    int                (*cb)(caddr_t);
    int                i;

    ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);

    if (bp->b_flags & B_READ) {
        sp->cmd_flags &= ~CFLAG_DMASEND;
        dma_flags = DDI_DMA_READ;
    } else {
        sp->cmd_flags |= CFLAG_DMASEND;
        dma_flags = DDI_DMA_WRITE;
    }
    if (flags & PKT_CONSISTENT) {
        sp->cmd_flags |= CFLAG_CMDIOPB;
        dma_flags |= DDI_DMA_CONSISTENT;
    }
    if (flags & PKT_DMA_PARTIAL) {
        dma_flags |= DDI_DMA_PARTIAL;
    }

    tmp_dma_attr = isp_dma_attr;
    tmp_dma_attr.dma_attr_burstsizes = isp->isp_burst_size;

    cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;

    if ((i = ddi_dma_alloc_handle(isp->isp_dip, &tmp_dma_attr,
      cb, 0, &sp->cmd_dmahandle)) != DDI_SUCCESS) {
        switch (i) {
          case DDI_DMA_BADATTR:
              bioerror(bp, EFAULT);
              return (0);
          case DDI_DMA_NORESOURCES:
              bioerror(bp, 0);
              return (0);
        }
    }

    i = ddi_dma_buf_bind_handle(sp->cmd_dmahandle, bp, dma_flags,
    cb, 0, &sp->cmd_dmacookies[0], &sp->cmd_ncookies);

    switch (i) {
      case DDI_DMA_PARTIAL_MAP:
          if (ddi_dma_numwin(sp->cmd_dmahandle, &sp->cmd_nwin) == DDI_FAILURE) {
              cmn_err(CE_PANIC, "ddi_dma_numwin() failed\n");
          }

          if (ddi_dma_getwin(sp->cmd_dmahandle, sp->cmd_curwin,
            &sp->cmd_dma_offset, &sp->cmd_dma_len, &sp->cmd_dmacookies[0], 
            &sp->cmd_ncookies) == DDI_FAILURE) {
              cmn_err(CE_PANIC, "ddi_dma_getwin() failed\n");
          }
          goto get_dma_cookies;

      case DDI_DMA_MAPPED:
          sp->cmd_nwin = 1;
          sp->cmd_dma_len = 0;
          sp->cmd_dma_offset = 0;

      get_dma_cookies:
          i = 0;
          sp->cmd_dmacount = 0;
          for (;;) {
              sp->cmd_dmacount += sp->cmd_dmacookies[i++].dmac_size;
              if (i == ISP_NDATASEGS || i == sp->cmd_ncookies)
                  break;
              ddi_dma_nextcookie(sp->cmd_dmahandle,
              &sp->cmd_dmacookies[i]);
          }
          sp->cmd_cookie = i;
          sp->cmd_cookiecnt = i;
          sp->cmd_flags |= CFLAG_DMAVALID;
          pkt->pkt_resid = bp->b_bcount - sp->cmd_dmacount;
          return (1);

      case DDI_DMA_NORESOURCES:
          bioerror(bp, 0);
          break;

      case DDI_DMA_NOMAPPING:
          bioerror(bp, EFAULT);
          break;

      case DDI_DMA_TOOBIG:
          bioerror(bp, EINVAL);
          break;

      case DDI_DMA_INUSE:
          cmn_err(CE_PANIC, "ddi_dma_buf_bind_handle:"
            " DDI_DMA_INUSE impossible\n");

      default:
          cmn_err(CE_PANIC, "ddi_dma_buf_bind_handle:"
            " 0x%x impossible\n", i);
    }
    ddi_dma_free_handle(&sp->cmd_dmahandle);
    sp->cmd_dmahandle = NULL;
    sp->cmd_flags &= ~CFLAG_DMAVALID;
    return (0);
}