hls4ml.converters.keras_v3.hgq2 package
Submodules
hls4ml.converters.keras_v3.hgq2.einsum module
hls4ml.converters.keras_v3.hgq2.multi_head_attention module
- class hls4ml.converters.keras_v3.hgq2.multi_head_attention.QLinformerAttentionHandler
Bases:
QMultiHeadAttentionHandler
- handle(layer: hgq.layers.linformer_attention.QLinformerAttention, in_tensors: Sequence[KerasTensor], out_tensors: Sequence[KerasTensor])
- handles = ('hgq.layers.linformer_attention.QLinformerAttention',)
hls4ml.converters.keras_v3.hgq2.pooling module
- class hls4ml.converters.keras_v3.hgq2.pooling.QPoolingHandler
Bases:
PoolingHandler
,QLayerHandler
- handles = ('hgq.layers.pooling.QMaxPooling1D', 'hgq.layers.pooling.QMaxPooling2D', 'hgq.layers.pooling.QMaxPooling3D', 'hgq.layers.pooling.QAveragePooling1D', 'hgq.layers.pooling.QAveragePooling2D', 'hgq.layers.pooling.QAveragePooling3D', 'hgq.layers.pooling.QGlobalAveragePooling1D', 'hgq.layers.pooling.QGlobalAveragePooling2D', 'hgq.layers.pooling.QGlobalAveragePooling3D', 'hgq.layers.pooling.QGlobalMaxPooling1D', 'hgq.layers.pooling.QGlobalMaxPooling2D', 'hgq.layers.pooling.QGlobalMaxPooling3D')
hls4ml.converters.keras_v3.hgq2.softmax module
- class hls4ml.converters.keras_v3.hgq2.softmax.QSoftmaxHandler
Bases:
QLayerHandler
- handle(layer: hgq.layers.QSoftmax, in_tensors: Sequence[KerasTensor], out_tensors: Sequence[KerasTensor])
- handles = ('hgq.layers.softmax.QSoftmax',)
- hls4ml.converters.keras_v3.hgq2.softmax.fixed_quantizer_to_hls4ml_t(q: FixedPointQuantizerBase, take_max=False)