blob: 205d0277c6eaebaef868fe23b9fbc8e7d0ca5e2e [file] [log] [blame]
/*
* Copyright 2023 Google LLC
* Copyright lowRISC contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS FILE HAS BEEN GENERATED, DO NOT EDIT MANUALLY. COMMAND:
// util/make_new_dif.py --mode=regen --only=autogen
/*
* NOTE: DMA_INTR_COMMON_<irq>_BIT was manually replaced with the IPs
* STATE register bit index.
*/
#include "sw/device/lib/dif/autogen/dif_dma_autogen.h"
#include <stdint.h>
#include "dma_regs.h" // Generated.
OT_WARN_UNUSED_RESULT
dif_result_t dif_dma_init(mmio_region_t base_addr, dif_dma_t *dma) {
if (dma == NULL) {
return kDifBadArg;
}
dma->base_addr = base_addr;
return kDifOk;
}
/**
* Get the corresponding interrupt register bit offset of the IRQ. If the IP's
* HJSON does NOT have a field "no_auto_intr_regs = true", then the
* "<ip>_INTR_COMMON_<irq>_BIT" macro can be used. Otherwise, special cases
* will exist, as templated below.
*/
static bool dma_get_irq_bit_index(dif_dma_irq_t irq,
bitfield_bit32_index_t *index_out) {
switch (irq) {
case kDifDmaIrqWriterDone:
*index_out = DMA_INTR_STATE_WTR_INTR_BIT;
break;
case kDifDmaIrqReaderDone:
*index_out = DMA_INTR_STATE_RDR_INTR_BIT;
break;
default:
return false;
}
return true;
}
OT_WARN_UNUSED_RESULT
dif_result_t dif_dma_irq_get_state(const dif_dma_t *dma,
dif_dma_irq_state_snapshot_t *snapshot) {
if (dma == NULL || snapshot == NULL) {
return kDifBadArg;
}
*snapshot = mmio_region_read32(dma->base_addr, DMA_INTR_STATE_REG_OFFSET);
return kDifOk;
}
OT_WARN_UNUSED_RESULT
dif_result_t dif_dma_irq_is_pending(const dif_dma_t *dma, dif_dma_irq_t irq,
bool *is_pending) {
if (dma == NULL || is_pending == NULL) {
return kDifBadArg;
}
bitfield_bit32_index_t index;
if (!dma_get_irq_bit_index(irq, &index)) {
return kDifBadArg;
}
uint32_t intr_state_reg =
mmio_region_read32(dma->base_addr, DMA_INTR_STATE_REG_OFFSET);
*is_pending = bitfield_bit32_read(intr_state_reg, index);
return kDifOk;
}
OT_WARN_UNUSED_RESULT
dif_result_t dif_dma_irq_acknowledge_all(const dif_dma_t *dma) {
if (dma == NULL) {
return kDifBadArg;
}
// Writing to the register clears the corresponding bits (Write-one clear).
mmio_region_write32(dma->base_addr, DMA_INTR_STATE_REG_OFFSET, UINT32_MAX);
return kDifOk;
}
OT_WARN_UNUSED_RESULT
dif_result_t dif_dma_irq_acknowledge(const dif_dma_t *dma, dif_dma_irq_t irq) {
if (dma == NULL) {
return kDifBadArg;
}
bitfield_bit32_index_t index;
if (!dma_get_irq_bit_index(irq, &index)) {
return kDifBadArg;
}
// Writing to the register clears the corresponding bits (Write-one clear).
uint32_t intr_state_reg = bitfield_bit32_write(0, index, true);
mmio_region_write32(dma->base_addr, DMA_INTR_STATE_REG_OFFSET,
intr_state_reg);
return kDifOk;
}
OT_WARN_UNUSED_RESULT
dif_result_t dif_dma_irq_get_enabled(const dif_dma_t *dma, dif_dma_irq_t irq,
dif_toggle_t *state) {
if (dma == NULL || state == NULL) {
return kDifBadArg;
}
bitfield_bit32_index_t index;
if (!dma_get_irq_bit_index(irq, &index)) {
return kDifBadArg;
}
uint32_t intr_enable_reg =
mmio_region_read32(dma->base_addr, DMA_INTR_ENABLE_REG_OFFSET);
bool is_enabled = bitfield_bit32_read(intr_enable_reg, index);
*state = is_enabled ? kDifToggleEnabled : kDifToggleDisabled;
return kDifOk;
}
OT_WARN_UNUSED_RESULT
dif_result_t dif_dma_irq_set_enabled(const dif_dma_t *dma, dif_dma_irq_t irq,
dif_toggle_t state) {
if (dma == NULL) {
return kDifBadArg;
}
bitfield_bit32_index_t index;
if (!dma_get_irq_bit_index(irq, &index)) {
return kDifBadArg;
}
uint32_t intr_enable_reg =
mmio_region_read32(dma->base_addr, DMA_INTR_ENABLE_REG_OFFSET);
bool enable_bit = (state == kDifToggleEnabled) ? true : false;
intr_enable_reg = bitfield_bit32_write(intr_enable_reg, index, enable_bit);
mmio_region_write32(dma->base_addr, DMA_INTR_ENABLE_REG_OFFSET,
intr_enable_reg);
return kDifOk;
}
OT_WARN_UNUSED_RESULT
dif_result_t dif_dma_irq_disable_all(const dif_dma_t *dma,
dif_dma_irq_enable_snapshot_t *snapshot) {
if (dma == NULL) {
return kDifBadArg;
}
// Pass the current interrupt state to the caller, if requested.
if (snapshot != NULL) {
*snapshot = mmio_region_read32(dma->base_addr, DMA_INTR_ENABLE_REG_OFFSET);
}
// Disable all interrupts.
mmio_region_write32(dma->base_addr, DMA_INTR_ENABLE_REG_OFFSET, 0u);
return kDifOk;
}
OT_WARN_UNUSED_RESULT
dif_result_t
dif_dma_irq_restore_all(const dif_dma_t *dma,
const dif_dma_irq_enable_snapshot_t *snapshot) {
if (dma == NULL || snapshot == NULL) {
return kDifBadArg;
}
mmio_region_write32(dma->base_addr, DMA_INTR_ENABLE_REG_OFFSET, *snapshot);
return kDifOk;
}