hls4ml.converters.keras_v3.hgq2 package

Submodules

hls4ml.converters.keras_v3.hgq2.einsum module

class hls4ml.converters.keras_v3.hgq2.einsum.QEinsumHandler

Bases: QLayerHandler

handle(layer: hgq.layers.QEinsum, in_tensors: Sequence[KerasTensor], out_tensors: Sequence[KerasTensor])
handles = ('hgq.layers.ops.einsum.QEinsum',)

hls4ml.converters.keras_v3.hgq2.multi_head_attention module

class hls4ml.converters.keras_v3.hgq2.multi_head_attention.QMultiHeadAttentionHandler

Bases: QLayerHandler

handle(layer: hgq.layers.QMultiHeadAttention, in_tensors: Sequence[KerasTensor], out_tensors: Sequence[KerasTensor])
handles = ('hgq.layers.multi_head_attention.QMultiHeadAttention',)

hls4ml.converters.keras_v3.hgq2.softmax module

class hls4ml.converters.keras_v3.hgq2.softmax.QSoftmaxHandler

Bases: QLayerHandler

handle(layer: hgq.layers.QSoftmax, in_tensors: Sequence[KerasTensor], out_tensors: Sequence[KerasTensor])
handles = ('hgq.layers.softmax.QSoftmax',)
hls4ml.converters.keras_v3.hgq2.softmax.fixed_quantizer_to_hls4ml_t(q: FixedPointQuantizerBase, take_max=False)

hls4ml.converters.keras_v3.hgq2.unary_lut module

class hls4ml.converters.keras_v3.hgq2.unary_lut.QUnaryLUTHandler

Bases: QLayerHandler, KerasV3LayerHandler

handle(layer: hgq.layers.QUnaryFunctionLUT, in_tensors: Sequence[KerasTensor], out_tensors: Sequence[KerasTensor])
handles = ('hgq.layers.activation.QUnaryFunctionLUT',)

Module contents