Lines Matching refs:addOperationInternal

279 uint32_t Model::addOperationInternal(op_type op, hexagon_nn_padding_type pad,  in addOperationInternal()  function in android::hardware::neuralnetworks::V1_0::implementation::hexagon::Model
326 uint32_t node = addOperationInternal(op, pad, inputs, outs); in addBasicOperation()
359 uint32_t node = addOperationInternal(op, pad, inputs, outs); in addFloatOperationWithActivation()
364 node = addOperationInternal(activation, NN_PAD_NA, buffer_in, outs); in addFloatOperationWithActivation()
377 uint32_t node = addOperationInternal(op, pad, inputs, outs); in addQuant8OperationWithActivation()
384 node = addOperationInternal(activation, NN_PAD_NA, buffer_in, outs); in addQuant8OperationWithActivation()
399 node = addOperationInternal(op, pad, inputs, outs); in addFusedFloatOperation()
404 node = addOperationInternal(OP_BiasAdd_f, NN_PAD_NA, {buffer1_in, bias}, outs); in addFusedFloatOperation()
411 node = addOperationInternal(activation, NN_PAD_NA, buffer2_in, outs); in addFusedFloatOperation()
438 node = addOperationInternal(op, pad, inputs, out32); in addFusedQuant8Operation()
446 node = addOperationInternal( in addFusedQuant8Operation()
456 node = addOperationInternal(OP_Requantize_32to8, NN_PAD_NA, in addFusedQuant8Operation()
467 node = addOperationInternal(activation, NN_PAD_NA, buffer, out8); in addFusedQuant8Operation()
499 uint32_t node = addOperationInternal(OP_INPUT, NN_PAD_NA, {}, outs); in addInputs()
541 uint32_t dequant = addOperationInternal( in addOutputs()
546 addOperationInternal(OP_Quantize, NN_PAD_NA, in addOutputs()