blob: 232a81a02a7deb9cd960a673b7383dfa12a6e5d9 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/BackendHelper.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000012#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010013#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010014
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000015#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000016#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010017
Mike Kelly46272802019-08-14 17:00:48 +010018#include "1.0/FullyConnected.hpp"
19
arovir01b0717b52018-09-05 17:03:25 +010020#include <ActivationFunctor.h>
21#include <CpuExecutor.h>
22#include <OperationsUtils.h>
23
James Ward4e22f602020-10-20 15:50:33 +010024#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010025
26#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010027#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010028
Kevin DuBoisa2cb5482020-08-26 13:41:12 -070029#ifdef __clang__
30#pragma clang diagnostic push
31#pragma clang diagnostic ignored "-Wunneeded-internal-declaration"
32#pragma clang diagnostic ignored "-Wunused-function"
33#pragma clang diagnostic ignored "-Wunused-variable"
34#endif
arovir01b0717b52018-09-05 17:03:25 +010035namespace armnn_driver
36{
37
38///
39/// Helper classes
40///
41
Kevin Mayec1e5b82020-02-26 17:00:39 +000042#ifdef ARMNN_ANDROID_R
Renato Grottesi77a0fb02023-05-08 12:55:03 +000043using OperandType = android::nn::hal::OperandType;
Kevin Mayec1e5b82020-02-26 17:00:39 +000044#endif
45
Renato Grottesi77a0fb02023-05-08 12:55:03 +000046#ifdef ARMNN_ANDROID_S
47#include <nnapi/Types.h>
48#endif
49
50
arovir01b0717b52018-09-05 17:03:25 +010051struct ConversionData
52{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010053 ConversionData(const std::vector<armnn::BackendId>& backends)
54 : m_Backends(backends)
55 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010056 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010057 {}
58
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010059 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010060 armnn::INetworkPtr m_Network;
61 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
62 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010063 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010064};
65
66class LayerInputHandle
67{
68public:
69 LayerInputHandle();
70 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
71
72 bool IsValid() const;
73
74 void Connect(armnn::IInputSlot& inputSlot);
75
Finn Williamsa4983ce2020-07-23 12:55:12 +010076 void Disconnect(armnn::IInputSlot& inputSlot);
77
arovir01b0717b52018-09-05 17:03:25 +010078 const armnn::TensorInfo& GetTensorInfo() const;
79
Renato Grottesi77a0fb02023-05-08 12:55:03 +000080 void SanitizeQuantizationScale(LayerInputHandle& weight,
81 LayerInputHandle& input);
82
arovir01b0717b52018-09-05 17:03:25 +010083private:
84 armnn::IOutputSlot* m_OutputSlot;
85 bool m_Valid;
86 armnn::TensorInfo m_TensorInfo;
87};
88
89class ConstTensorPin
90{
91public:
92 // Creates an invalid tensor pin (can be used to signal errors)
93 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
94 ConstTensorPin(bool optional = false);
95
96 // @param tensorInfo TensorInfo associated with the tensor.
97 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
98 // the model being converted.
99 // @param numBytes Number of bytes for the tensor data.
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000100 ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
arovir01b0717b52018-09-05 17:03:25 +0100101 const armnn::PermutationVector& mappings);
102
103 ConstTensorPin(const ConstTensorPin& other) = delete;
104 ConstTensorPin(ConstTensorPin&& other) = default;
105
106 bool IsValid() const;
107 bool IsOptional() const;
108
109 const armnn::ConstTensor& GetConstTensor() const;
110 const armnn::ConstTensor* GetConstTensorPtr() const;
111
112private:
113 armnn::ConstTensor m_ConstTensor;
114
115 // Owned memory for swizzled tensor data, only required if the tensor needed
116 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
117 // the pools associated with the model being converted.
118 std::vector<uint8_t> m_SwizzledTensorData;
119
120 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
121 bool m_Optional;
122};
123
124} // namespace armnn_driver
125
126///
127/// Utility functions
128///
129
130namespace
131{
132
133using namespace armnn_driver;
134using namespace android::nn;
135
136// Convenience function to log the reason for failing to convert a model.
137// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
138template<class... Args>
139static bool Fail(const char* formatStr, Args&&... args)
140{
141 ALOGD(formatStr, std::forward<Args>(args)...);
142 return false;
143}
144
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100145// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
146// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000147#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100148try \
149{ \
150 for (auto&& backendId : backends) \
151 { \
152 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000153 if (layerSupportObject.IsBackendRegistered()) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100154 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100155 std::string reasonIfUnsupported; \
156 supported = \
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000157 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100159 { \
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000160 setBackend = backendId; \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100161 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100162 } \
163 else \
164 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100165 if (reasonIfUnsupported.size() > 0) \
166 { \
167 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
168 } \
169 else \
170 { \
171 ALOGD("%s: not supported by armnn", funcName); \
172 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100173 } \
174 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100175 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100176 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100177 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100178 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100179 } \
180 if (!supported) \
181 { \
182 ALOGD("%s: not supported by any specified backend", funcName); \
183 } \
184} \
185catch (const armnn::InvalidArgumentException &e) \
186{ \
187 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
188}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100189
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000190template<typename HalOperand>
191armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100192{
193 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
194}
195
Matthew Bentham912b3622019-05-03 15:49:14 +0100196inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100197{
Matthew Bentham912b3622019-05-03 15:49:14 +0100198 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
199 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
200 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100201}
202
Kevin May42477c12020-03-26 13:34:14 +0000203#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100204
Keith Davis71006492020-01-06 17:44:16 +0000205// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100206inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
207{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000208 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000209 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000210 type == V1_2::OperandType::TENSOR_FLOAT16 ||
211 type == V1_2::OperandType::TENSOR_FLOAT32 ||
212 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000213 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000214 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
215 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100216 type == V1_2::OperandType::TENSOR_INT32;
217}
218
219#endif
220
Kevin May42477c12020-03-26 13:34:14 +0000221#ifdef ARMNN_ANDROID_NN_V1_3
222
223// Support within the 1.3 driver for specific tensor data types
224inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
225{
226 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100227 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000228 type == V1_3::OperandType::TENSOR_FLOAT16 ||
229 type == V1_3::OperandType::TENSOR_FLOAT32 ||
230 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100231 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000232 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
233 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
234 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
235 type == V1_3::OperandType::TENSOR_INT32;
236}
237
238#endif
239
Mike Kellyb5fdf382019-06-11 16:35:25 +0100240inline bool IsBool(V1_0::Operand)
241{
242 return false;
243}
244
Kevin May42477c12020-03-26 13:34:14 +0000245inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100246{
247 return false;
248}
249
Kevin May42477c12020-03-26 13:34:14 +0000250#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100251
252inline bool IsBool(V1_2::Operand operand)
253{
254 return operand.type == V1_2::OperandType::BOOL;
255}
256
Sadik Armagan61113162019-07-25 09:09:40 +0100257/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000258inline bool Is12OrLaterOperand(V1_2::Operand)
259{
260 return true;
261}
262
263#endif
264
265#ifdef ARMNN_ANDROID_NN_V1_3
266
267inline bool IsBool(V1_3::Operand operand)
268{
269 return operand.type == V1_3::OperandType::BOOL;
270}
271
272/// Checks if a operand is 1_2 Operand
273inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100274{
275 return true;
276}
277
Mike Kellyb5fdf382019-06-11 16:35:25 +0100278#endif
279
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100280template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000281armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
282 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100283 armnn::TensorInfo reshapeInfo)
284{
285 armnn::ReshapeDescriptor reshapeDescriptor;
286 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
287
288 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000289 if (!reshapeLayer)
290 {
291 throw armnn::RuntimeException("ReshapeLayer is null");
292 }
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100293
294 // Attach the input layer to the reshape layer
295 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
296 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
297
298 return *reshapeLayer;
299}
300
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000301bool BroadcastTensor(LayerInputHandle& input0,
302 LayerInputHandle& input1,
303 armnn::IConnectableLayer* startLayer,
304 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100305{
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000306 if (!startLayer)
307 {
308 throw armnn::RuntimeException("StartLayer is null");
309 }
arovir01b0717b52018-09-05 17:03:25 +0100310
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100311 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
312 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
313
314 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
315 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
316
317 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100318 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100319 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
320 input0.Connect(startLayer->GetInputSlot(0));
321 input1.Connect(startLayer->GetInputSlot(1));
322
Sadik Armagan64b19b52019-08-19 09:49:58 +0100323 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100324 }
325
326 // Since the number of dimensions do not match then we need to add degenerate dimensions
327 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
328
329 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100330 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
331 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100332
333 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
334 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
335 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
336
337 const armnn::TensorShape& smallShape = smallInfo.GetShape();
338 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
339 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
340 {
341 reshapedDimensions[i] = smallShape[i - sizeDifference];
342 }
343
344 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100345 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100346 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100347
348 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
349 armnn::ReshapeDescriptor reshapeDescriptor;
350
351 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000352 armnn::BackendId setBackend;
Sadik Armagan64b19b52019-08-19 09:49:58 +0100353 FORWARD_LAYER_SUPPORT_FUNC(__func__,
354 IsReshapeSupported,
355 data.m_Backends,
356 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000357 setBackend,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000358 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100359 reshapedInfo,
360 reshapeDescriptor);
361 if (!isSupported)
362 {
363 return false;
364 }
365
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000366 if (!data.m_Network)
367 {
368 throw armnn::RuntimeException("Network is null");
369 }
370
Sadik Armagan64b19b52019-08-19 09:49:58 +0100371 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000372 reshapeLayer.SetBackendId(setBackend);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100373
374 if (input0IsSmaller)
375 {
376 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
377 //
378 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100379 // | |
380 // Reshape |
381 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100382 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100383
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100384 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
385 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100386 }
387 else
388 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100389 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
390 //
391 // Input0 Input1
392 // | |
393 // | Reshape
394 // \ /
395 // StartLayer
396
arovir01b0717b52018-09-05 17:03:25 +0100397 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100398 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100399 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100400
401 return true;
arovir01b0717b52018-09-05 17:03:25 +0100402}
403
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000404void CalcPadding(uint32_t input,
405 uint32_t kernel,
406 uint32_t stride,
407 uint32_t& outPadHead,
408 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100409 android::nn::PaddingScheme scheme)
410{
411 int32_t padHead;
412 int32_t padTail;
413 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100414 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
415 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100416}
417
Kevin May42477c12020-03-26 13:34:14 +0000418#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100419
420void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
421 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
422{
423 int32_t padHead;
424 int32_t padTail;
425 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100426 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
427 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100428}
429
Mike Kelly26123db2020-01-15 10:02:33 +0000430void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100431 int32_t& outPadTail, android::nn::PaddingScheme scheme)
432{
433 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
434}
435
Mike Kelly86b36d42019-07-12 16:39:33 +0100436#endif
437
Matthew Bentham912b3622019-05-03 15:49:14 +0100438Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100439{
440 Shape shape;
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000441 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100442 shape.dimensions = operand.dimensions;
443 shape.scale = operand.scale;
444 shape.offset = operand.zeroPoint;
445 return shape;
446}
447
Kevin May42477c12020-03-26 13:34:14 +0000448#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100449
450Shape GetOperandShape(const V1_2::Operand& operand)
451{
452 Shape shape;
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000453 shape.type = OperandType(operand.type);
Mike Kelly46272802019-08-14 17:00:48 +0100454 shape.dimensions = operand.dimensions;
455 shape.scale = operand.scale;
456 shape.offset = operand.zeroPoint;
457 return shape;
458}
459
460#endif
461
Kevin May42477c12020-03-26 13:34:14 +0000462#ifdef ARMNN_ANDROID_NN_V1_3
463
464Shape GetOperandShape(const V1_3::Operand& operand)
465{
466 Shape shape;
467 shape.type = OperandType(operand.type);
468 shape.dimensions = operand.dimensions;
469 shape.scale = operand.scale;
470 shape.offset = operand.zeroPoint;
471 return shape;
472}
473
474#endif
475
arovir01b0717b52018-09-05 17:03:25 +0100476// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
477// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100478// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
479// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100480void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000481 const armnn::TensorInfo& weightInfo,
482 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100483{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000484 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100485 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000486 // NOTE: Bias scale is always set to 0 for per-axis quantization and
487 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
488 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100489 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000490 return biasScale * inputInfo.GetQuantizationScale();
491 };
492
493 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
494 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
495
496 biasInfo.SetQuantizationScales(biasScales);
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000497 // bias is expected to be a 1d tensor, set qdim=0
498 biasInfo.SetQuantizationDim(0);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000499
500 ALOGV("Bias quantization params have been updated for per-axis quantization");
501 }
502 else
503 {
504 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
505 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
506 {
James Ward4e22f602020-10-20 15:50:33 +0100507 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000508 {
509 ALOGW("Bias quantization scale has been modified to match input * weights");
510 biasInfo.SetQuantizationScale(expectedBiasScale);
511 }
arovir01b0717b52018-09-05 17:03:25 +0100512 }
513 }
514}
515
516// 4D Tensor Permutations
517const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100518const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000519const armnn::PermutationVector SwapDim2And3({ 0U, 1U, 3U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100520
521// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000522const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
523const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100524
525template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000526armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
527 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100528{
529 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000530 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000531 if (!layer)
532 {
533 throw armnn::RuntimeException("TransposeLayer is null");
534 }
arovir01b0717b52018-09-05 17:03:25 +0100535 // Connect input to swizzle layer
536 input.Connect(layer->GetInputSlot(0));
537
538 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000539 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100540 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
541
542 return *layer;
543}
544
arovir01b0717b52018-09-05 17:03:25 +0100545bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
546 const armnn::TensorShape & outputShape,
547 uint32_t concatDim)
548{
549 // Validate the output shape is correct given the input shapes (which have just been validated)
550 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
551 if (outputShape.GetNumDimensions() != numDimensions)
552 {
553 return Fail("%s: Output shape has wrong number of dimensions", __func__);
554 }
555
556 unsigned int outputSizeAlongConcatenatedDimension = 0;
557 for (unsigned int i = 0; i < inputShapes.size(); i++)
558 {
559 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
560 }
561
562 for (unsigned int i = 0; i < numDimensions; ++i)
563 {
564 if (i == concatDim)
565 {
566 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
567 {
568 return Fail(
569 "%s: Invalid output shape for dimension %d (%d != %d)",
570 __func__,
571 i,
572 outputShape[i],
573 outputSizeAlongConcatenatedDimension);
574 }
575 }
576 else
577 {
578 if (outputShape[i] != inputShapes[0][i])
579 {
580 return Fail("%s: Invalid output shape", __func__);
581 }
582 }
583 }
584
585 return true;
586}
587
588bool RequiresReshape(armnn::TensorShape & inputShape)
589{
590 return inputShape.GetNumDimensions() < 3;
591}
592
arovir01b0717b52018-09-05 17:03:25 +0100593void SwizzleInputs(armnn::INetwork& network,
594 std::vector<LayerInputHandle>& inputs,
595 std::vector<armnn::TensorShape>& inputShapes,
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000596 const armnn::PermutationVector& mapping,
597 std::vector<armnn::BackendId>& setBackends)
arovir01b0717b52018-09-05 17:03:25 +0100598{
599 if (!mapping.IsEqual(IdentityPermutation4D))
600 {
601 size_t nInputs = inputs.size();
602 for (size_t i=0; i<nInputs; ++i)
603 {
604 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000605 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000606 swizzleLayer.SetBackendId(setBackends[i]);
arovir01b0717b52018-09-05 17:03:25 +0100607 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
608 auto& outputInfo = outputSlot.GetTensorInfo();
609 // replace inputs with the swizzled ones
610 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
611 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
612 }
613 }
614}
615
Teresa Charlin185f5882020-04-06 21:59:18 +0100616bool TransposeInputTensors(ConversionData& data,
617 std::vector<LayerInputHandle>& inputs,
618 std::vector<armnn::TensorShape>& inputShapes,
619 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000620{
David Monahan7f492ac2020-10-16 10:36:29 +0100621 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
622 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000623 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000624 std::vector<armnn::BackendId> setBackendsVec;
Teresa Charlin185f5882020-04-06 21:59:18 +0100625 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000626 size_t nInputs = inputs.size();
627 for (size_t i=0; i<nInputs; ++i)
628 {
629 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000630 armnn::TransposeDescriptor transposeDesc;
631 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100632 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000633
634 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000635 armnn::BackendId setBackend;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000636 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000637 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000638 data.m_Backends,
639 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000640 setBackend,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000641 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100642 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000643 transposeDesc);
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000644 setBackendsVec.push_back(setBackend);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000645 if (!isSupported)
646 {
647 return false;
648 }
649
650 }
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000651 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping, setBackendsVec);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000652 }
653 return true;
654}
655
656
narpra01f176d5a2018-11-18 20:17:48 +0000657bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
658 int32_t & concatDimension,
659 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100660{
narpra01f176d5a2018-11-18 20:17:48 +0000661 bool needPermute = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000662
663 if (numberOfDimensions < 3)
664 {
665 return Fail("%s: Invalid numberOfDimensions: %i < 3", __func__, numberOfDimensions);
666 }
arovir01b0717b52018-09-05 17:03:25 +0100667
668 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000669 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
670 // or along dimension 0 or 2 for a 3-D tensor.
671 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100672 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000673 concatDimension = 3;
674 permutationPair = std::make_pair(SwapDim2And3, SwapDim2And3);
narpra01f176d5a2018-11-18 20:17:48 +0000675 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100676 }
narpra01f176d5a2018-11-18 20:17:48 +0000677 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100678 {
narpra01f176d5a2018-11-18 20:17:48 +0000679 concatDimension = 0;
680 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
681 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100682 }
David Monahan7f492ac2020-10-16 10:36:29 +0100683 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
684 // permutation identity to only have 3 dimensions
685 else if (numberOfDimensions == 3 && concatDimension == 2)
686 {
687 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
688 }
narpra01f176d5a2018-11-18 20:17:48 +0000689 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100690}
691
692} // anonymous namespace
693
694namespace armnn_driver
695{
696
697//// Creates an ArmNN activation layer and connects it to the given layer, if the
698//// passed in AndroidNN activation function requires so.
699//// @return The end layer of the sequence of layers built for the given AndroidNN
700//// activation function or nullptr if an error occurred (e.g. unsupported activation).
701//// Note that the end layer matches the input layer if no activation is required
702//// (the sequence of layers has length 1).
703armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
704 ActivationFn activation,
705 armnn::IConnectableLayer* prevLayer,
706 ConversionData& data);
707
708} // namespace armnn_driver
709
710///
711/// Utility templates
712///
713
714namespace armnn_driver
715{
716
717using namespace android::nn;
718
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100719template<typename HalPolicy,
720 typename HalOperand = typename HalPolicy::Operand,
721 typename HalOperation = typename HalPolicy::Operation,
722 typename HalModel = typename HalPolicy::Model>
723const HalOperand* GetInputOperand(const HalOperation& operation,
724 uint32_t inputIndex,
725 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100726 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100727{
728 if (inputIndex >= operation.inputs.size())
729 {
saoste01b8471482018-10-10 09:44:51 +0100730 if (failOnIndexOutOfBounds)
731 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000732 Fail("%s: Invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
saoste01b8471482018-10-10 09:44:51 +0100733 }
arovir01b0717b52018-09-05 17:03:25 +0100734 return nullptr;
735 }
736
Kevin May42477c12020-03-26 13:34:14 +0000737 // Model should have been validated beforehand
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000738 if (operation.inputs[inputIndex] >= getMainModel(model).operands.size())
739 {
740 Fail("%s: invalid model index: %i >= %i", __func__, inputIndex, getMainModel(model).operands.size());
741 return nullptr;
742 }
743
Kevin May42477c12020-03-26 13:34:14 +0000744 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100745}
746
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100747template<typename HalPolicy,
748 typename HalOperand = typename HalPolicy::Operand,
749 typename HalOperation = typename HalPolicy::Operation,
750 typename HalModel = typename HalPolicy::Model>
751const HalOperand* GetOutputOperand(const HalOperation& operation,
752 uint32_t outputIndex,
753 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100754{
755 if (outputIndex >= operation.outputs.size())
756 {
757 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
758 return nullptr;
759 }
760
761 // Model should have been validated beforehand
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000762 if (operation.inputs[outputIndex] >= getMainModel(model).operands.size())
763 {
764 Fail("%s: invalid model index: %i >= %i", __func__, outputIndex, getMainModel(model).operands.size());
765 return nullptr;
766 }
Kevin May42477c12020-03-26 13:34:14 +0000767 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100768}
769
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100770template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100771 typename HalOperand = typename HalPolicy::Operand,
772 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100773const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100774 const HalModel& model,
775 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000776 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100777{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100778 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100779
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100780 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100781 switch (operand.lifetime)
782 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100783 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100784 {
785 // Constant found in model.operandValues
786 valueStart = &model.operandValues[operand.location.offset];
787 break;
788 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100789 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100790 {
791 // Constant specified via a Memory object
792 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
793 break;
794 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100795 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000796 {
797 // An optional input tensor with no values is not an error so should not register as a fail
798 if (optional)
799 {
800 valueStart = nullptr;
801 break;
802 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100803 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000804 }
arovir01b0717b52018-09-05 17:03:25 +0100805 default:
806 {
807 // Unsupported/invalid (e.g. can't get value of an input to the model)
808 Fail("%s: unsupported/invalid operand lifetime: %s",
809 __func__, toString(operand.lifetime).c_str());
810 valueStart = nullptr;
811 }
812 }
813
814 return valueStart;
815}
816
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100817template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100818 typename HalOperation = typename HalPolicy::Operation,
819 typename HalModel = typename HalPolicy::Model,
820 typename HalOperandType = typename HalPolicy::OperandType>
821bool GetOperandType(const HalOperation& operation,
822 uint32_t inputIndex,
823 const HalModel& model,
824 HalOperandType& type)
825{
826 using HalOperand = typename HalPolicy::Operand;
827
828 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
829 if (!operand)
830 {
831 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
832 }
833
834 type = operand->type;
835 return true;
836}
837
838template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000839 typename HalOperand = typename HalPolicy::Operand>
840bool IsOperandConstant(const HalOperand& operand)
841{
842 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
843
844 HalOperandLifeTime lifetime = operand.lifetime;
845
846 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
847 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
848 lifetime == HalOperandLifeTime::NO_VALUE;
849}
850
851template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100852 typename HalOperand = typename HalPolicy::Operand,
853 typename HalModel = typename HalPolicy::Model>
854ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
855 const HalModel& model,
856 const ConversionData& data,
857 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
858 const armnn::TensorShape* overrideTensorShape = nullptr,
859 bool optional = false)
860{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100861 if (!IsOperandTypeSupportedForTensors(operand.type))
862 {
863 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
864 return ConstTensorPin();
865 }
866
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000867 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100868 {
869 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
870 return ConstTensorPin();
871 }
872
873 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
874 if (!valueStart)
875 {
876 if (optional)
877 {
878 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
879 return ConstTensorPin(true);
880 }
881 // mandatory tensor with no values
882 Fail("%s: failed to get operand address", __func__);
883 return ConstTensorPin();
884 }
885
886 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Renato Grottesi77a0fb02023-05-08 12:55:03 +0000887
888 // Make sure isConstant flag is set.
889 tensorInfo.SetConstant();
Teresa Charlin02dce092019-11-11 17:06:23 +0000890
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100891 if (overrideTensorShape != nullptr)
892 {
893 tensorInfo.SetShape(*overrideTensorShape);
894 }
895 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
896}
897
898template<typename HalPolicy,
899 typename HalOperation = typename HalPolicy::Operation,
900 typename HalModel = typename HalPolicy::Model>
901ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
902 uint32_t inputIndex,
903 const HalModel& model,
904 const ConversionData& data,
905 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
906 const armnn::TensorShape* overrideTensorShape = nullptr,
907 bool optional = false)
908{
909 using HalOperand = typename HalPolicy::Operand;
910
911 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
912 if (!operand)
913 {
914 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
915 return ConstTensorPin();
916 }
917 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
918 model,
919 data,
920 dimensionMappings,
921 overrideTensorShape,
922 optional);
923}
924
925template<typename HalPolicy,
926 typename OutputType,
927 typename HalOperandType = typename HalPolicy::OperandType,
928 typename HalOperation = typename HalPolicy::Operation,
929 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100930bool GetInputScalar(const HalOperation& operation,
931 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100932 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100933 OutputType& outValue,
934 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100935 const ConversionData& data,
936 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100937{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100938 using HalOperand = typename HalPolicy::Operand;
939
940 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100941 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100942 {
943 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
944 }
945
Sadik Armagan813f2302020-05-19 14:10:30 +0100946 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100947 {
948 return Fail("%s: unexpected operand type: %s (should be %s)",
949 __func__, toString(operand->type).c_str(), toString(type).c_str());
950 }
951
Sadik Armagan813f2302020-05-19 14:10:30 +0100952 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100953 {
954 return Fail("%s: incorrect operand location length: %i (should be %i)",
955 __func__, operand->location.length, sizeof(OutputType));
956 }
957
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100958 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100959 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100960 {
961 return Fail("%s: failed to get address for operand", __func__);
962 }
963
Sadik Armagan813f2302020-05-19 14:10:30 +0100964 if(!optional)
965 {
966 outValue = *(static_cast<const OutputType*>(valueAddress));
967 }
968
arovir01b0717b52018-09-05 17:03:25 +0100969 return true;
970}
971
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100972template<typename HalPolicy,
973 typename HalOperation = typename HalPolicy::Operation,
974 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100975bool GetInputInt32(const HalOperation& operation,
976 uint32_t inputIndex,
977 int32_t& outValue,
978 const HalModel& model,
979 const ConversionData& data)
980{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100981 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100982}
983
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100984template<typename HalPolicy,
985 typename HalOperation = typename HalPolicy::Operation,
986 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100987bool GetInputFloat32(const HalOperation& operation,
988 uint32_t inputIndex,
989 float& outValue,
990 const HalModel& model,
991 const ConversionData& data)
992{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100993 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100994}
995
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100996template<typename HalPolicy,
997 typename HalOperation = typename HalPolicy::Operation,
998 typename HalOperandType = typename HalPolicy::OperandType,
999 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001000bool GetInputActivationFunctionImpl(const HalOperation& operation,
1001 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +01001002 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +01001003 ActivationFn& outActivationFunction,
1004 const HalModel& model,
1005 const ConversionData& data)
1006{
Mike Kellyb5fdf382019-06-11 16:35:25 +01001007 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001008 {
1009 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
1010 __func__,
1011 toString(type).c_str(),
Kevin DuBois30c34ae2020-08-26 13:53:41 -07001012 toString(HalOperandType::INT32).c_str(),
1013 toString(HalOperandType::TENSOR_INT32).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001014 }
1015
1016 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001017 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001018 {
1019 return Fail("%s: failed to get activation input value", __func__);
1020 }
1021 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
1022 return true;
1023}
1024
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001025template<typename HalPolicy,
1026 typename HalOperation = typename HalPolicy::Operation,
1027 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001028bool GetInputActivationFunction(const HalOperation& operation,
1029 uint32_t inputIndex,
1030 ActivationFn& outActivationFunction,
1031 const HalModel& model,
1032 const ConversionData& data)
1033{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001034 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1035 inputIndex,
1036 HalPolicy::OperandType::INT32,
1037 outActivationFunction,
1038 model,
1039 data);
arovir01b0717b52018-09-05 17:03:25 +01001040}
1041
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001042template<typename HalPolicy,
1043 typename HalOperation = typename HalPolicy::Operation,
1044 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001045bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1046 uint32_t inputIndex,
1047 ActivationFn& outActivationFunction,
1048 const HalModel& model,
1049 const ConversionData& data)
1050{
1051 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001052 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1053 inputIndex,
1054 HalPolicy::OperandType::INT32,
1055 outActivationFunction,
1056 model,
1057 data);
arovir01b0717b52018-09-05 17:03:25 +01001058}
1059
1060
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001061template<typename HalPolicy,
1062 typename HalOperation = typename HalPolicy::Operation,
1063 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001064bool GetOptionalInputActivation(const HalOperation& operation,
1065 uint32_t inputIndex,
1066 ActivationFn& activationFunction,
1067 const HalModel& model,
1068 const ConversionData& data)
1069{
1070 if (operation.inputs.size() <= inputIndex)
1071 {
1072 activationFunction = ActivationFn::kActivationNone;
1073 }
1074 else
1075 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001076 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001077 {
1078 return Fail("%s: Operation has invalid inputs", __func__);
1079 }
1080 }
1081 return true;
1082}
1083
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001084template<typename HalPolicy,
1085 typename ConvolutionDescriptor,
1086 typename HalOperation = typename HalPolicy::Operation,
1087 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001088bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1089 uint32_t dilationXIndex,
1090 ConvolutionDescriptor& descriptor,
1091 const HalModel& model,
1092 const ConversionData& data)
1093{
1094 bool success = true;
1095 if (operation.inputs.size() >= dilationXIndex + 2)
1096 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001097 success &= GetInputScalar<HalPolicy>(operation,
1098 dilationXIndex,
1099 HalPolicy::OperandType::INT32,
1100 descriptor.m_DilationX,
1101 model,
1102 data);
1103 success &= GetInputScalar<HalPolicy>(operation,
1104 dilationXIndex + 1,
1105 HalPolicy::OperandType::INT32,
1106 descriptor.m_DilationY,
1107 model,
1108 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001109 }
1110
1111 return success;
1112}
1113
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001114template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001115 typename HalOperation = typename HalPolicy::Operation,
1116 typename HalModel = typename HalPolicy::Model>
1117bool GetOptionalBool(const HalOperation& operation,
1118 uint32_t inputIndex,
1119 const HalModel& model,
1120 const ConversionData& data)
1121{
1122 using HalOperand = typename HalPolicy::Operand;
1123
1124 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1125 if (!operand)
1126 {
1127 return false;
1128 }
1129
1130 if (!IsBool(*operand))
1131 {
1132 return false;
1133 }
1134
1135 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1136 if (!valueAddress)
1137 {
1138 return false;
1139 }
1140
1141 if (*(static_cast<const bool*>(valueAddress)))
1142 {
1143 return true;
1144 }
1145 else
1146 {
1147 return false;
1148 }
1149}
1150
1151template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001152 typename HalOperand = typename HalPolicy::Operand,
1153 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001154bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001155 std::vector<int32_t>& outValues,
1156 const HalModel& model,
1157 const ConversionData& data)
1158{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001159 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001160 {
1161 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1162 }
1163
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001164 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001165 if (!startAddress)
1166 {
1167 return Fail("%s: failed to get operand address", __func__, operand.type);
1168 }
1169
1170 // Check number of bytes is sensible
1171 const uint32_t numBytes = operand.location.length;
1172 if (numBytes % sizeof(int32_t) != 0)
1173 {
1174 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1175 __func__, numBytes, sizeof(int32_t));
1176 }
1177
1178 outValues.resize(numBytes / sizeof(int32_t));
1179 memcpy(outValues.data(), startAddress, numBytes);
1180 return true;
1181}
1182
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001183template<typename HalPolicy,
1184 typename HalOperation = typename HalPolicy::Operation,
1185 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001186bool GetInputPaddingScheme(const HalOperation& operation,
1187 uint32_t inputIndex,
1188 PaddingScheme& outPaddingScheme,
1189 const HalModel& model,
1190 const ConversionData& data)
1191{
1192 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001193 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001194 {
1195 return Fail("%s: failed to get padding scheme input value", __func__);
1196 }
1197
1198 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1199 return true;
1200}
1201
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001202template<typename HalPolicy,
1203 typename HalOperation = typename HalPolicy::Operation,
1204 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001205LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1206 uint32_t inputIndex,
1207 const HalModel& model,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001208 ConversionData& data,
1209 const armnn::PermutationVector& dimensionMappings = g_DontPermute)
arovir01b0717b52018-09-05 17:03:25 +01001210{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001211 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001212 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001213 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1214
1215 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001216 if (!operand)
1217 {
1218 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1219 return LayerInputHandle();
1220 }
1221
1222 if (!IsOperandTypeSupportedForTensors(operand->type))
1223 {
1224 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1225 return LayerInputHandle();
1226 }
1227
Sadik Armagan44bcc022019-06-18 17:21:36 +01001228 try
arovir01b0717b52018-09-05 17:03:25 +01001229 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001230 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001231 if (IsDynamicTensor(operandTensorInfo))
1232 {
1233 Fail("%s: dynamic input tensors are not supported", __func__);
1234 return LayerInputHandle();
1235 }
arovir01b0717b52018-09-05 17:03:25 +01001236
Sadik Armagan44bcc022019-06-18 17:21:36 +01001237 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001238 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001239 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001240 {
1241 // NOTE: We must check whether we can support the input tensor on at least one
1242 // of the provided backends; otherwise we cannot convert the operation
1243 bool isInputSupported = false;
1244 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1245 IsInputSupported,
1246 data.m_Backends,
1247 isInputSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001248 armnn::BackendId(),
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001249 operandTensorInfo);
1250
1251 if (!isInputSupported)
1252 {
1253 Fail("%s: unsupported input tensor", __func__);
1254 return LayerInputHandle();
1255 }
1256
James Ward4e22f602020-10-20 15:50:33 +01001257 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001258 }
1259 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001260 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001261 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001262 // The tensor is either an operand internal to the model, or a model input.
1263 // It can be associated with an ArmNN output slot for an existing layer.
1264
1265 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1266 const uint32_t operandIndex = operation.inputs[inputIndex];
1267 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001268 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001269 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001270 case HalOperandLifeTime::CONSTANT_REFERENCE:
1271 {
1272 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001273 ConstTensorPin tensorPin =
1274 ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1275
Sadik Armagan44bcc022019-06-18 17:21:36 +01001276 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001277 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001278 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001279 armnn::BackendId setBackend;
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001280 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1281 IsConstantSupported,
1282 data.m_Backends,
1283 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001284 setBackend,
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001285 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001286 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001287 {
1288 return LayerInputHandle();
1289 }
1290
1291 armnn::IConnectableLayer* constantLayer =
1292 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001293 constantLayer->SetBackendId(setBackend);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001294 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001295 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1296 outputSlot.SetTensorInfo(constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001297
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001298 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001299 }
1300 else
1301 {
1302 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001303 return LayerInputHandle();
1304 }
arovir01b0717b52018-09-05 17:03:25 +01001305 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001306 default:
arovir01b0717b52018-09-05 17:03:25 +01001307 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001308 // Unsupported lifetime for an input tensor
1309 Fail("%s: unsupported lifetime for input tensor: %s",
1310 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001311 return LayerInputHandle();
1312 }
arovir01b0717b52018-09-05 17:03:25 +01001313 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001314 }
1315 catch (UnsupportedOperand<HalOperandType>& e)
1316 {
1317 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1318 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001319 }
1320}
1321
Kevin May42477c12020-03-26 13:34:14 +00001322
1323#ifdef ARMNN_ANDROID_NN_V1_3
1324template<typename HalPolicy>
1325LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1326 uint32_t inputIndex,
1327 const::android::hardware::neuralnetworks::V1_3::Model& model,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001328 ConversionData& data,
1329 const armnn::PermutationVector& dimensionMappings = g_DontPermute)
Kevin May42477c12020-03-26 13:34:14 +00001330{
1331 using HalOperand = typename HalPolicy::Operand;
1332 using HalOperandType = typename HalPolicy::OperandType;
1333 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1334
1335 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1336 if (!operand)
1337 {
1338 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1339 return LayerInputHandle();
1340 }
1341
1342 if (!IsOperandTypeSupportedForTensors(operand->type))
1343 {
1344 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1345 return LayerInputHandle();
1346 }
1347
1348 try
1349 {
1350 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001351
Kevin May42477c12020-03-26 13:34:14 +00001352 if (IsDynamicTensor(operandTensorInfo))
1353 {
Finn Williams291a16b2020-08-19 22:54:00 +01001354 data.m_DynamicInputsEncountered = true;
1355
Finn Williams9a044412020-08-17 19:08:35 +01001356 const uint32_t operandIndex = operation.inputs[inputIndex];
1357
1358 // Check if the dynamic input tensors have been inferred by one of the previous layers
1359 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001360 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001361 {
1362 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1363 }
1364 else
1365 {
1366 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1367 return LayerInputHandle();
1368 }
Kevin May42477c12020-03-26 13:34:14 +00001369 }
1370
1371 switch (operand->lifetime)
1372 {
1373 case HalOperandLifeTime::SUBGRAPH_INPUT:
1374 {
1375 // NOTE: We must check whether we can support the input tensor on at least one
1376 // of the provided backends; otherwise we cannot convert the operation
1377 bool isInputSupported = false;
1378 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1379 IsInputSupported,
1380 data.m_Backends,
1381 isInputSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001382 armnn::BackendId(),
Kevin May42477c12020-03-26 13:34:14 +00001383 operandTensorInfo);
1384
1385 if (!isInputSupported)
1386 {
1387 Fail("%s: unsupported input tensor", __func__);
1388 return LayerInputHandle();
1389 }
1390
James Ward4e22f602020-10-20 15:50:33 +01001391 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001392 }
1393 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1394 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1395 {
1396 // The tensor is either an operand internal to the model, or a model input.
1397 // It can be associated with an ArmNN output slot for an existing layer.
1398
1399 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1400 const uint32_t operandIndex = operation.inputs[inputIndex];
1401 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1402 }
1403 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1404 case HalOperandLifeTime::CONSTANT_REFERENCE:
1405 {
1406 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001407 ConstTensorPin tensorPin =
1408 ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1409
Kevin May42477c12020-03-26 13:34:14 +00001410 if (tensorPin.IsValid())
1411 {
1412 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001413 armnn::BackendId setBackend;
Kevin May42477c12020-03-26 13:34:14 +00001414 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1415 IsConstantSupported,
1416 data.m_Backends,
1417 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001418 setBackend,
Kevin May42477c12020-03-26 13:34:14 +00001419 tensorPin.GetConstTensor().GetInfo());
1420 if (!isSupported)
1421 {
1422 return LayerInputHandle();
1423 }
1424
1425 armnn::IConnectableLayer* constantLayer =
1426 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001427 constantLayer->SetBackendId(setBackend);
Kevin May42477c12020-03-26 13:34:14 +00001428 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001429 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1430 outputSlot.SetTensorInfo(constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001431
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001432 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001433 }
1434 else
1435 {
1436 Fail("%s: invalid operand tensor", __func__);
1437 return LayerInputHandle();
1438 }
1439 break;
1440 }
1441 default:
1442 {
1443 // Unsupported lifetime for an input tensor
1444 Fail("%s: unsupported lifetime for input tensor: %s",
1445 __func__, toString(operand->lifetime).c_str());
1446 return LayerInputHandle();
1447 }
1448 }
1449 }
1450 catch (UnsupportedOperand<HalOperandType>& e)
1451 {
1452 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1453 return LayerInputHandle();
1454 }
1455}
1456#endif
1457
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001458template<typename HalPolicy,
1459 typename HalOperation = typename HalPolicy::Operation,
1460 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001461bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1462 uint32_t operationOutputIndex,
1463 armnn::IConnectableLayer& layer,
1464 uint32_t layerOutputIndex,
1465 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001466 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001467 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001468 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001469 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001470 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001471{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001472 using HalOperand = typename HalPolicy::Operand;
1473
1474 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001475 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1476 {
1477 return false;
1478 }
1479
1480 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001481 if (overrideOutputInfo == nullptr)
1482 {
1483 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1484 }
1485 else
1486 {
1487 outputSlot.SetTensorInfo(*overrideOutputInfo);
1488 }
1489
Finn Williamsa4983ce2020-07-23 12:55:12 +01001490 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001491 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001492 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001493 // Type one dynamic tensors require the previous layer's output shape for inference
1494 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1495 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001496 if (!layer.GetInputSlot(inputSlotIndex).GetConnection())
Sadik Armagandbda4b72020-09-03 11:33:07 +01001497 {
1498 return false;
1499 }
1500 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001501 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001502 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001503 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001504 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1505
Sadik Armagandbda4b72020-09-03 11:33:07 +01001506 if(!isSupported)
1507 {
1508 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1509 {
1510 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1511 }
1512 return false;
1513 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001514 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001515
1516 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Mike Kellyb5fdf382019-06-11 16:35:25 +01001517
Kevin Mayfcf2a152020-09-08 16:06:32 +01001518 if (activationFunction != ActivationFn::kActivationNone)
1519 {
1520 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1521 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1522 &layer, data);
1523
1524 if (!endLayer)
1525 {
1526 return Fail("%s: ProcessActivation failed", __func__);
1527 }
1528
1529 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1530 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1531 }
1532 else
1533 {
1534 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1535 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001536
1537 return true;
1538}
1539
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001540template<typename HalPolicy,
1541 typename HalOperation = typename HalPolicy::Operation,
1542 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001543armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1544 uint32_t inputIndex,
1545 const HalModel& model,
1546 ConversionData& data)
1547{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001548 using HalOperand = typename HalPolicy::Operand;
1549
1550 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001551 if (!operand)
1552 {
1553 return armnn::DataLayout::NHWC;
1554 }
1555
1556 if (!IsBool(*operand))
1557 {
1558 return armnn::DataLayout::NHWC;
1559 }
1560
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001561 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001562 if (!valueAddress)
1563 {
1564 return armnn::DataLayout::NHWC;
1565 }
1566
1567 if (*(static_cast<const bool*>(valueAddress)))
1568 {
1569 return armnn::DataLayout::NCHW;
1570 }
1571 else
1572 {
1573 return armnn::DataLayout::NHWC;
1574 }
1575}
1576
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001577template<typename HalPolicy,
1578 typename HalOperation = typename HalPolicy::Operation,
1579 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001580bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1581 uint32_t outputIndex,
1582 armnn::IConnectableLayer& layer,
1583 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001584 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001585 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001586 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1587 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001588{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001589 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1590 outputIndex,
1591 layer,
1592 outputIndex,
1593 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001594 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001595 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001596 validateFunc,
1597 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001598}
1599
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001600template<typename HalPolicy,
1601 typename HalOperation = typename HalPolicy::Operation,
1602 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001603bool ConvertToActivation(const HalOperation& operation,
1604 const char* operationName,
1605 const armnn::ActivationDescriptor& activationDesc,
1606 const HalModel& model,
1607 ConversionData& data)
1608{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001609 using HalOperand = typename HalPolicy::Operand;
1610
1611 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001612 if (!input.IsValid())
1613 {
1614 return Fail("%s: Input 0 is invalid", operationName);
1615 }
1616
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001617 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001618 if (!outputOperand)
1619 {
1620 return false;
1621 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001622
1623 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001624
1625 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001626 armnn::BackendId setBackend;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001627 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1628 {
1629 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1630 IsActivationSupported,
1631 data.m_Backends,
1632 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001633 setBackend,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001634 input.GetTensorInfo(),
1635 outInfo,
1636 activationDesc);
1637 };
1638
1639 if(IsDynamicTensor(outInfo))
1640 {
1641 isSupported = AreDynamicTensorsSupported();
1642 }
1643 else
1644 {
1645 validateFunc(outInfo, isSupported);
1646 }
1647
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001648 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001649 {
1650 return false;
1651 }
1652
1653 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001654 layer->SetBackendId(setBackend);
1655 if (!layer)
1656 {
1657 return Fail("%s: Could not add the ActivationLayer", __func__);
1658 }
arovir01b0717b52018-09-05 17:03:25 +01001659 input.Connect(layer->GetInputSlot(0));
1660
Finn Williamsa4983ce2020-07-23 12:55:12 +01001661 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001662}
1663
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001664template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001665 typename HalOperation = typename HalPolicy::Operation,
1666 typename HalModel = typename HalPolicy::Model>
1667bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1668{
1669 armnn::ActivationDescriptor desc;
1670 desc.m_Function = armnn::ActivationFunction::ReLu;
1671
1672 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1673}
1674
1675template<typename HalPolicy,
1676 typename HalOperation = typename HalPolicy::Operation,
1677 typename HalModel = typename HalPolicy::Model>
1678bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1679{
1680 armnn::ActivationDescriptor desc;
1681 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1682 desc.m_A = 1.0f;
1683 desc.m_B = -1.0f;
1684
1685 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1686}
1687
1688template<typename HalPolicy,
1689 typename HalOperation = typename HalPolicy::Operation,
1690 typename HalModel = typename HalPolicy::Model>
1691bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1692{
1693 armnn::ActivationDescriptor desc;
1694 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1695 desc.m_A = 6.0f;
1696
1697 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1698}
1699
1700template<typename HalPolicy,
1701 typename HalOperation = typename HalPolicy::Operation,
1702 typename HalModel = typename HalPolicy::Model>
1703bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1704{
1705 armnn::ActivationDescriptor desc;
1706 desc.m_Function = armnn::ActivationFunction::TanH;
1707 desc.m_A = 1.0f; // android nn does not support tanH parameters
1708 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1709
1710 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1711}
1712
1713template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001714 typename HalOperation = typename HalPolicy::Operation,
1715 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001716bool ConvertPaddings(const HalOperation& operation,
1717 const HalModel& model,
1718 ConversionData& data,
1719 unsigned int rank,
1720 armnn::PadDescriptor& padDescriptor)
1721{
1722 using HalOperand = typename HalPolicy::Operand;
1723
1724 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1725 if (!paddingsOperand)
1726 {
1727 return Fail("%s: Could not read paddings operand", __func__);
1728 }
1729
1730 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1731 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1732 {
1733 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1734 }
1735
1736 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001737 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1738 {
1739 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1740 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001741
1742 // add padding for each dimension of input tensor.
1743 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1744 {
1745 int paddingBeforeInput = paddings[i];
1746 int paddingAfterInput = paddings[i + 1];
1747
1748 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1749 {
1750 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1751 }
1752
1753 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1754 }
1755
1756 return true;
1757}
1758
1759template<typename HalPolicy,
1760 typename HalOperation = typename HalPolicy::Operation,
1761 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001762bool ConvertPooling2d(const HalOperation& operation,
1763 const char* operationName,
1764 armnn::PoolingAlgorithm poolType,
1765 const HalModel& model,
1766 ConversionData& data)
1767{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001768 using HalOperand = typename HalPolicy::Operand;
1769 using HalOperandType = typename HalPolicy::OperandType;
1770
1771 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001772 if (!input.IsValid())
1773 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001774 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001775 }
1776
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001777 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001778 if (!output)
1779 {
1780 return Fail("%s: Could not read output 0", __func__);
1781 }
1782
1783 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1784 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1785
arovir01b0717b52018-09-05 17:03:25 +01001786 armnn::Pooling2dDescriptor desc;
1787 desc.m_PoolType = poolType;
1788 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001789 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001790
1791 ActivationFn activation;
1792
Sadik Armagan15d63e22019-07-26 16:59:35 +01001793 auto inputSize = operation.inputs.size();
1794
1795 if (inputSize >= 10)
1796 {
1797 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1798 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1799 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1800 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1801 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1802 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1803 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1804 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1805 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1806 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1807 {
1808 return Fail("%s: Operation has invalid inputs", operationName);
1809 }
1810
Kevin May42477c12020-03-26 13:34:14 +00001811 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001812 {
1813 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1814 }
1815 }
1816 else
arovir01b0717b52018-09-05 17:03:25 +01001817 {
1818 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1819 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001820 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1821 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1822 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1823 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1824 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1825 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001826 {
1827 return Fail("%s: Operation has invalid inputs", operationName);
1828 }
1829
Kevin May42477c12020-03-26 13:34:14 +00001830 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001831 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001832 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001833 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001834
1835 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1836 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1837 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1838
1839 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1840 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001841 }
1842
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001843 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001844 armnn::BackendId setBackend;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001845 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1846 {
1847 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1848 IsPooling2dSupported,
1849 data.m_Backends,
1850 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001851 setBackend,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001852 inputInfo,
1853 outputInfo,
1854 desc);
1855
1856 };
1857
1858 if(IsDynamicTensor(outputInfo))
1859 {
1860 isSupported = AreDynamicTensorsSupported();
1861 }
1862 else
1863 {
1864 validateFunc(outputInfo, isSupported);
1865 }
1866
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001867 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001868 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001869 return false;
arovir01b0717b52018-09-05 17:03:25 +01001870 }
arovir01b0717b52018-09-05 17:03:25 +01001871
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001872 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001873 pooling2dLayer->SetBackendId(setBackend);
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001874 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001875 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001876 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001877 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001878
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001879 input.Connect(pooling2dLayer->GetInputSlot(0));
1880
Finn Williamsa4983ce2020-07-23 12:55:12 +01001881 if (!isSupported)
1882 {
1883 return false;
1884 }
1885
Kevin Mayfcf2a152020-09-08 16:06:32 +01001886 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1887 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001888}
1889
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001890template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001891 typename HalOperation = typename HalPolicy::Operation,
1892 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001893bool ConvertArgMinMax(const HalOperation& operation,
1894 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001895 ConversionData& data,
1896 armnn::ArgMinMaxFunction argMinMaxFunction)
1897{
1898 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1899
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001900 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001901 using HalOperandType = typename HalPolicy::OperandType;
1902
1903 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1904
1905 if (!input0.IsValid())
1906 {
1907 return Fail("%s: Operation has invalid inputs", __func__);
1908 }
1909
1910 int32_t axis;
1911 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1912 {
1913 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1914 }
1915
1916 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1917 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1918
1919 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1920 {
1921 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1922 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1923 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1924 return Fail("%s: Axis must be in range [-n, n)", __func__);
1925 }
1926
1927 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1928 if (!output)
1929 {
1930 return Fail("%s: Could not read output 0", __func__);
1931 }
1932
1933 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1934
1935 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001936
1937 armnn::ArgMinMaxDescriptor descriptor;
1938 descriptor.m_Function = argMinMaxFunction;
1939 descriptor.m_Axis = axis;
1940
1941 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001942 armnn::BackendId setBackend;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001943 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1944 {
1945 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1946 IsArgMinMaxSupported,
1947 data.m_Backends,
1948 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001949 setBackend,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001950 inputInfo0,
1951 outputInfo,
1952 descriptor);
1953 };
1954
1955 if(IsDynamicTensor(outputInfo))
1956 {
1957 isSupported = AreDynamicTensorsSupported();
1958 }
1959 else
1960 {
1961 validateFunc(outputInfo, isSupported);
1962 }
1963
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001964 if (!isSupported)
1965 {
1966 return false;
1967 }
1968
1969 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00001970 layer->SetBackendId(setBackend);
1971 if (!layer)
1972 {
1973 return Fail("%s: Could not add the ArgMinMaxLayer", __func__);
1974 }
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001975 input0.Connect(layer->GetInputSlot(0));
1976
Finn Williamsa4983ce2020-07-23 12:55:12 +01001977 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001978}
1979
1980template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001981 typename HalOperation = typename HalPolicy::Operation,
1982 typename HalModel = typename HalPolicy::Model>
1983bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001984{
Keith Davis6e4081f2020-09-03 13:17:21 +01001985 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001986 using HalOperandType = typename HalPolicy::OperandType;
1987
1988 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1989 if (operation.inputs.size() <= 1)
1990 {
1991 return Fail("%s: Operation has insufficient arguments", __func__);
1992 }
1993
1994 // Get inputs and outputs
1995 const std::size_t numInputTensors = operation.inputs.size() - 1;
1996
1997 int32_t concatDim;
1998 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1999 {
2000 return Fail("%s: Operation has invalid inputs", __func__);
2001 }
2002
2003 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2004 if (!outputOperand)
2005 {
2006 return Fail("%s: Operation has no outputs", __func__);
2007 }
2008
Keith Davis6e4081f2020-09-03 13:17:21 +01002009 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2010 armnn::TensorShape outputShape = outputInfo.GetShape();
2011 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002012 //
2013 // handle negative concat dims along the lines of tensorflow as described here:
2014 // https://www.tensorflow.org/api_docs/python/tf/concat
2015 // "negative axis refers to axis + rank(values)-th dimension"
2016 //
2017 if (concatDim < 0)
2018 {
2019 concatDim += outputShape.GetNumDimensions();
2020 }
2021
2022 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2023 {
2024 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2025 }
2026
2027 std::vector<LayerInputHandle> inputHandles;
2028 std::vector<armnn::TensorShape> inputShapes;
2029
2030 inputHandles.reserve(numInputTensors);
2031 inputShapes.reserve(numInputTensors);
2032
Keith Davis6e4081f2020-09-03 13:17:21 +01002033 bool inputsHaveBeenReshaped = false;
2034 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002035 for (uint32_t i = 0; i < numInputTensors; ++i)
2036 {
2037 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2038 if (!operand)
2039 {
2040 return Fail("%s: Operation has invalid inputs", __func__);
2041 }
2042
Teresa Charlin3b959602019-10-31 17:05:47 +00002043 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2044 if (!operandInputHandle.IsValid())
2045 {
2046 return Fail("%s: Operation has invalid inputs", __func__);
2047 }
Mike Kellyb8805202019-07-31 17:25:43 +01002048
Keith Davis6e4081f2020-09-03 13:17:21 +01002049 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002050 if (operandShape.GetNumDimensions() == 0)
2051 {
2052 return Fail("%s: Operands with rank 0 are not supported", __func__);
2053 }
2054
2055 if (RequiresReshape(operandShape))
2056 {
2057 inputsHaveBeenReshaped = true;
2058
2059 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2060
2061 // Expand the tensor to three dimensions
2062 if (operandShape.GetNumDimensions() == 2)
2063 {
2064 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2065 tensorDimensionsAdded = 1;
2066 }
2067 else
2068 {
2069 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2070 tensorDimensionsAdded = 2;
2071 }
2072
Kevin Mayaed08ac2019-12-12 16:33:31 +00002073 armnn::ReshapeDescriptor reshapeDescriptor;
2074 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2075
2076 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002077 armnn::BackendId setBackendReshape;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002078 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2079 IsReshapeSupported,
2080 data.m_Backends,
2081 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002082 setBackendReshape,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002083 operandInputHandle.GetTensorInfo(),
2084 reshapeInfo,
2085 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002086
Kevin Mayaed08ac2019-12-12 16:33:31 +00002087 if (!isSupported)
2088 {
2089 return false;
2090 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002091 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002092 newReshape.SetBackendId(setBackendReshape);
Mike Kellyb8805202019-07-31 17:25:43 +01002093
2094 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002095 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002096 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2097 }
2098
2099 inputShapes.emplace_back(operandShape);
2100 inputHandles.emplace_back(operandInputHandle);
2101
2102 if (!inputHandles.back().IsValid())
2103 {
2104 return Fail("%s: Operation has invalid inputs", __func__);
2105 }
2106 }
2107
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002108 if (inputShapes.size() != inputHandles.size())
2109 {
2110 return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
2111 inputShapes.size(), inputHandles.size());
2112 }
Mike Kellyb8805202019-07-31 17:25:43 +01002113
2114 if (inputsHaveBeenReshaped)
2115 {
2116 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2117 concatDim += tensorDimensionsAdded;
2118
2119 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2120 if (tensorDimensionsAdded == 1)
2121 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002122 if (IsDynamicTensor(outputInfo))
2123 {
2124 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2125 }
2126 else
2127 {
2128 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2129 }
Mike Kellyb8805202019-07-31 17:25:43 +01002130 }
2131 else if (tensorDimensionsAdded == 2)
2132 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002133 if (IsDynamicTensor(outputInfo))
2134 {
2135 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2136 }
2137 else
2138 {
2139 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2140 }
Mike Kellyb8805202019-07-31 17:25:43 +01002141 }
2142 }
2143
2144 // Check if permutations is required and get the pair of permutations required for the concatenation.
2145 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2146 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002147 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002148 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2149 concatDim,
2150 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002151
Keith Davis6e4081f2020-09-03 13:17:21 +01002152 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2153 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002154 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002155 if (needPermute)
2156 {
2157 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2158 }
2159
2160 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002161 }
Mike Kellyb8805202019-07-31 17:25:43 +01002162 // this is no-op for identity swizzles, otherwise it replaces both
2163 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002164 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002165 {
2166 return false;
2167 }
Mike Kellyb8805202019-07-31 17:25:43 +01002168
2169 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2170 armnn::OriginsDescriptor concatDescriptor;
2171
2172 try
2173 {
2174 // The concat descriptor is always created across the only supported concat dimension
2175 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002176 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2177 inputShapes.end(),
2178 concatDim);
2179 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002180 {
2181 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2182 }
2183
2184 // Validate the output shape is correct given the input shapes based on the
2185 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002186 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002187 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002188 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2189 {
2190 return Fail("%s: Error validating the output shape for concat", __func__);
2191 }
Mike Kellyb8805202019-07-31 17:25:43 +01002192 }
2193
2194 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2195 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002196 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002197
Keith Davis6e4081f2020-09-03 13:17:21 +01002198 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002199 armnn::BackendId setBackendConcat;
Keith Davis6e4081f2020-09-03 13:17:21 +01002200 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002201 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2202 IsConcatSupported,
2203 data.m_Backends,
2204 isSupported,
2205 setBackendConcat,
2206 inputTensorInfos,
2207 outputInfo,
2208 concatDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002209 };
2210
2211 if (!isDynamicTensor)
2212 {
2213 validateFunc(outputInfo, isSupported);
2214 }
2215 else
2216 {
2217 isSupported = AreDynamicTensorsSupported();
2218 }
2219
Mike Kellyb8805202019-07-31 17:25:43 +01002220 if (!isSupported)
2221 {
2222 return false;
2223 }
2224
2225 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002226 layer->SetBackendId(setBackendConcat);
2227 if (!layer)
2228 {
2229 return Fail("%s: Could not add the ConcatLayer", __func__);
2230 }
Mike Kellyb8805202019-07-31 17:25:43 +01002231 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002232 // Connect inputs to the layer
2233 const int numInputSlots = layer->GetNumInputSlots();
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002234
2235 if (static_cast<std::size_t>(numInputSlots) != inputHandles.size())
2236 {
2237 return Fail("%s: invalid model input slots size doesn't match input handles size: %i != %i", __func__,
2238 static_cast<std::size_t>(numInputSlots), inputHandles.size());
2239 }
Mike Kellyb8805202019-07-31 17:25:43 +01002240 for (int i = 0; i < numInputSlots; ++i)
2241 {
2242 // connect the input directly to the merge (concat) layer
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002243 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(static_cast<unsigned int>(i)));
Mike Kellyb8805202019-07-31 17:25:43 +01002244 }
2245
Keith Davis6e4081f2020-09-03 13:17:21 +01002246 // Transpose the output shape
2247 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002248 armnn::TransposeDescriptor transposeDesc;
2249 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002250 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2251 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2252 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002253 isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002254 armnn::BackendId setBackendTranspose;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002255 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002256 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002257 data.m_Backends,
2258 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002259 setBackendTranspose,
Teresa Charlin185f5882020-04-06 21:59:18 +01002260 inputTransposeInfo,
2261 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002262 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002263 if (!isSupported)
2264 {
2265 return false;
2266 }
Mike Kellyb8805202019-07-31 17:25:43 +01002267 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002268 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002269 permutationPair.second);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002270 deswizzleLayer.SetBackendId(setBackendTranspose);
Mike Kellyb8805202019-07-31 17:25:43 +01002271 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002272
2273 return true;
2274 };
2275
2276 if (needPermute && !isDynamicTensor)
2277 {
2278 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002279 }
2280
2281 if (inputsHaveBeenReshaped)
2282 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002283 if (isDynamicTensor)
2284 {
2285 // Infer the output shapes of concat if outputs are type 1 dynamic
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002286 if (!layer->GetOutputSlot(0).IsTensorInfoSet())
2287 {
2288 return Fail("%s: TensorInfo is not set", __func__);
2289 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002290 if (!ValidateConcatOutputShape(inputShapes,
2291 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2292 concatDim))
2293 {
2294 return Fail("%s: Error validating the output shape for concat", __func__);
2295 }
2296 transposeOutputShape();
2297 }
Mike Kellyb8805202019-07-31 17:25:43 +01002298
Mike Kellyb8805202019-07-31 17:25:43 +01002299 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002300 // Undo the reshape knowing the amount of dimensions added
2301 if (tensorDimensionsAdded == 1)
2302 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002303 afterConcatInfo.SetShape(
2304 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002305 }
2306 else if (tensorDimensionsAdded == 2)
2307 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002308 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002309 }
2310
Kevin Mayaed08ac2019-12-12 16:33:31 +00002311 armnn::ReshapeDescriptor reshapeDescriptor;
2312 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002313 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002314
Keith Davis6e4081f2020-09-03 13:17:21 +01002315 isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002316 armnn::BackendId setBackendReshape2;
Keith Davis6e4081f2020-09-03 13:17:21 +01002317 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2318 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2319 IsReshapeSupported,
2320 data.m_Backends,
2321 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002322 setBackendReshape2,
Keith Davis6e4081f2020-09-03 13:17:21 +01002323 concatInfo,
2324 afterConcatInfo,
2325 reshapeDescriptor);
2326 };
2327
2328 if (!IsDynamicTensor(afterConcatInfo))
2329 {
2330 validateReshapeFunc(afterConcatInfo, isSupported);
2331 }
2332 else
2333 {
2334 isSupported = AreDynamicTensorsSupported();
2335 }
2336
Kevin Mayaed08ac2019-12-12 16:33:31 +00002337 if (!isSupported)
2338 {
2339 return false;
2340 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002341 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002342 layer->SetBackendId(setBackendReshape2);
Keith Davis6e4081f2020-09-03 13:17:21 +01002343 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2344 0,
2345 *layer,
2346 model,
2347 data,
2348 nullptr,
2349 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002350 }
2351
Keith Davis6e4081f2020-09-03 13:17:21 +01002352 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002353}
2354
2355template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002356 typename HalOperation = typename HalPolicy::Operation,
2357 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002358bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2359{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002360 using HalOperand = typename HalPolicy::Operand;
2361 using HalOperandType = typename HalPolicy::OperandType;
2362
2363 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002364 if (!input.IsValid())
2365 {
2366 return Fail("%s: Operation has invalid inputs", __func__);
2367 }
2368
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002369 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002370 if (!output)
2371 {
2372 return Fail("%s: Could not read output 0", __func__);
2373 }
2374
2375 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002376 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002377
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002378 LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2379 if (!weightsInput.IsValid())
Mike Kellyb5fdf382019-06-11 16:35:25 +01002380 {
2381 return Fail("%s: Operation has invalid inputs", __func__);
2382 }
2383
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002384 LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2385 if (!biasInput.IsValid())
2386 {
2387 return Fail("%s: Operation has invalid inputs", __func__);
2388 }
2389
2390 biasInput.SanitizeQuantizationScale(weightsInput, input);
2391 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2392 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002393
2394 armnn::Convolution2dDescriptor desc;
2395 desc.m_DataLayout = armnn::DataLayout::NHWC;
2396 ActivationFn activation;
2397
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002398 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002399 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002400 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2401 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2402 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2403 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2404 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2405 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002406 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002407 {
2408 return Fail("%s: Operation has invalid inputs", __func__);
2409 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002410 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002411 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002412 {
2413 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002414 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2415 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2416 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002417 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002418 {
2419 return Fail("%s: Operation has invalid inputs", __func__);
2420 }
2421
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002422 const uint32_t kernelX = weightsInfo.GetShape()[2];
2423 const uint32_t kernelY = weightsInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002424 const uint32_t inputX = inputInfo.GetShape()[2];
2425 const uint32_t inputY = inputInfo.GetShape()[1];
2426
2427 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2428 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002429 }
2430 else
2431 {
2432 return Fail("%s: Unsupported number of operation inputs", __func__);
2433 }
2434
2435 desc.m_BiasEnabled = true;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002436 armnn::Optional<armnn::TensorInfo> biases(biasInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002437
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002438 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002439 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002440 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2441 {
2442 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2443 IsConvolution2dSupported,
2444 data.m_Backends,
2445 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002446 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002447 inputInfo,
2448 outputInfo,
2449 desc,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002450 weightsInfo,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002451 biases);
2452 };
2453
2454 if(!IsDynamicTensor(outputInfo))
2455 {
2456 validateFunc(outputInfo, isSupported);
2457 }
2458 else
2459 {
2460 isSupported = AreDynamicTensorsSupported();
2461 }
2462
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002463 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002464 {
2465 return false;
2466 }
2467
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002468 armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
2469 startLayer->SetBackendId(setBackend);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002470
2471 if (!startLayer)
2472 {
2473 return Fail("%s: AddConvolution2dLayer failed", __func__);
2474 }
2475
Mike Kellyb5fdf382019-06-11 16:35:25 +01002476 input.Connect(startLayer->GetInputSlot(0));
2477
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002478 // Connect weights and bias inputs
2479 weightsInput.Connect(startLayer->GetInputSlot(1));
2480 biasInput.Connect(startLayer->GetInputSlot(2));
2481
Kevin Mayfcf2a152020-09-08 16:06:32 +01002482 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2483 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002484}
2485
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002486template<typename HalPolicy,
2487 typename HalOperation = typename HalPolicy::Operation,
2488 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002489bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2490{
2491 using HalOperand = typename HalPolicy::Operand;
2492 using HalOperandType = typename HalPolicy::OperandType;
2493
2494 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2495 if (!input.IsValid() )
2496 {
2497 return Fail("%s: Operation has invalid inputs", __func__);
2498 }
2499
2500 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2501 unsigned int rank = inputInfo.GetNumDimensions();
2502 if (rank != 4)
2503 {
2504 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2505 }
2506
2507 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2508 if (!output)
2509 {
2510 return Fail("%s: Could not read output 0", __func__);
2511 }
2512
2513 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002514
2515 armnn::DepthToSpaceDescriptor descriptor;
2516
2517 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2518 if (descriptor.m_BlockSize <= 1)
2519 {
2520 return Fail("%s: Block size must be at least 1 in all dimensions");
2521 }
2522
2523 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002524 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002525 {
2526 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2527 }
2528
2529 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002530 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002531 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2532 {
2533 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2534 IsDepthToSpaceSupported,
2535 data.m_Backends,
2536 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002537 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002538 inputInfo,
2539 outputInfo,
2540 descriptor);
2541 };
2542
2543 if(!IsDynamicTensor(outputInfo))
2544 {
2545 validateFunc(outputInfo, isSupported);
2546 }
2547 else
2548 {
2549 isSupported = AreDynamicTensorsSupported();
2550 }
2551
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002552 if (!isSupported)
2553 {
2554 return false;
2555 }
2556
2557 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002558 layer->SetBackendId(setBackend);
2559 if (!layer)
2560 {
2561 return Fail("%s: Could not add the DepthToSpaceLayer", __func__);
2562 }
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002563 input.Connect(layer->GetInputSlot(0));
2564
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002565 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002566}
2567
2568template<typename HalPolicy,
2569 typename HalOperation = typename HalPolicy::Operation,
2570 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002571bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2572{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002573 using HalOperand = typename HalPolicy::Operand;
2574 using HalOperandType = typename HalPolicy::OperandType;
2575
2576 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002577
2578 if (!input.IsValid())
2579 {
2580 return Fail("%s: Operation has invalid inputs", __func__);
2581 }
2582
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002583 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002584
2585 if (!output)
2586 {
2587 return Fail("%s: Could not read output 0", __func__);
2588 }
2589
2590 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002591 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002592
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002593 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002594 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002595 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002596 if (!weightsOperand)
Renato Grottesi1b2abb72023-05-08 06:58:44 +00002597 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002598 return Fail("%s: Could not read weights", __func__);
Renato Grottesi1b2abb72023-05-08 06:58:44 +00002599 }
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002600 // Basic sanity check on the weights shape.
2601 // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2602 // [1, filter_height, filter_width, depth_out]
2603 if (weightsOperand->dimensions[0] != 1)
2604 {
2605 return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2606 }
2607
Mike Kellyb5fdf382019-06-11 16:35:25 +01002608 armnn::DepthwiseConvolution2dDescriptor desc;
2609 desc.m_DataLayout = armnn::DataLayout::NHWC;
2610
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002611 LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2612 if (!weightsInput.IsValid())
Mike Kellyb5fdf382019-06-11 16:35:25 +01002613 {
2614 return Fail("%s: Operation has invalid inputs", __func__);
2615 }
2616
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002617 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2618 if (!biasOperand)
2619 {
2620 return Fail("%s: Could not read bias", __func__);
2621 }
2622
2623 LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2624 if (!biasInput.IsValid())
2625 {
2626 return Fail("%s: Operation has invalid inputs", __func__);
2627 }
2628
2629 biasInput.SanitizeQuantizationScale(weightsInput, input);
2630 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2631 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002632
2633 ActivationFn activation;
2634
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002635 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002636 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002637 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2638 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2639 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2640 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2641 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2642 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002643 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002644 {
2645 return Fail("%s: Operation has invalid inputs", __func__);
2646 }
2647 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002648 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002649 {
2650 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002651 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2652 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2653 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002654 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002655 {
2656 return Fail("%s: Operation has invalid inputs", __func__);
2657 }
2658
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002659 const uint32_t kernelX = weightsInfo.GetShape()[2];
2660 const uint32_t kernelY = weightsInfo.GetShape()[1];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002661 const uint32_t inputX = inputInfo.GetShape()[2];
2662 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002663
2664 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2665 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2666 }
2667 else
2668 {
2669 return Fail("%s: Unsupported number of operation inputs", __func__);
2670 }
2671
2672 desc.m_BiasEnabled = true;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002673 armnn::Optional<armnn::TensorInfo> biases(biasInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002674
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002675 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002676 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002677 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2678 {
2679 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2680 IsDepthwiseConvolutionSupported,
2681 data.m_Backends,
2682 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002683 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002684 inputInfo,
2685 outputInfo,
2686 desc,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002687 weightsInfo,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002688 biases);
2689 };
2690
2691 if(!IsDynamicTensor(outputInfo))
2692 {
2693 validateFunc(outputInfo, isSupported);
2694 }
2695 else
2696 {
2697 isSupported = AreDynamicTensorsSupported();
2698 }
2699
2700
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002701 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002702 {
2703 return false;
2704 }
2705
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002706 armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
2707 startLayer->SetBackendId(setBackend);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002708 if (!startLayer)
2709 {
2710 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2711 }
2712
Mike Kellyb5fdf382019-06-11 16:35:25 +01002713 input.Connect(startLayer->GetInputSlot(0));
2714
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002715 // Connect weights and bias inputs
2716 weightsInput.Connect(startLayer->GetInputSlot(1));
2717 biasInput.Connect(startLayer->GetInputSlot(2));
2718
Kevin Mayfcf2a152020-09-08 16:06:32 +01002719 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2720 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002721}
2722
Mike Kelly3c673942019-07-25 09:26:06 +01002723template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002724 typename HalOperation = typename HalPolicy::Operation,
2725 typename HalModel = typename HalPolicy::Model>
2726bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002727{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002728 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002729
2730 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2731 if (!input.IsValid())
2732 {
2733 return Fail("%s: Operation has invalid input", __func__);
2734 }
2735
Sadik Armagan98c0f662019-11-21 15:54:36 +00002736 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2737 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2738 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2739 {
2740 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2741 }
2742
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002743 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002744 if (!outputOperand)
2745 {
2746 return Fail("%s: Operation has invalid outputs", __func__);
2747 }
2748
2749 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002750
2751 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002752 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002753 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2754 {
2755 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2756 IsDequantizeSupported,
2757 data.m_Backends,
2758 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002759 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002760 inputInfo,
2761 outputInfo);
2762 };
2763
2764 if(IsDynamicTensor(outputInfo))
2765 {
2766 isSupported = AreDynamicTensorsSupported();
2767 }
2768 else
2769 {
2770 validateFunc(outputInfo, isSupported);
2771 }
2772
Mike Kelly46272802019-08-14 17:00:48 +01002773 if (!isSupported)
2774 {
2775 return false;
2776 }
2777
2778 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002779 layer->SetBackendId(setBackend);
2780 if (!layer)
2781 {
2782 return Fail("%s: Could not add the DequantizeLayer", __func__);
2783 }
Mike Kelly46272802019-08-14 17:00:48 +01002784 input.Connect(layer->GetInputSlot(0));
2785
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002786 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002787}
2788
2789template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002790 typename HalOperation = typename HalPolicy::Operation,
2791 typename HalModel = typename HalPolicy::Model>
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002792bool ConvertElementwiseBinary(const HalOperation& operation,
2793 const HalModel& model,
2794 ConversionData& data,
2795 armnn::BinaryOperation binaryOperation)
Mike Kelly46272802019-08-14 17:00:48 +01002796{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002797 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002798
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002799 ALOGV("HalPolicy::ConvertElementwiseBinary()");
2800 ALOGV("binaryOperation = %s", GetBinaryOperationAsCString(binaryOperation));
2801
Mike Kelly46272802019-08-14 17:00:48 +01002802 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2803 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2804
2805 if (!input0.IsValid() || !input1.IsValid())
2806 {
2807 return Fail("%s: Operation has invalid inputs", __func__);
2808 }
2809
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002810 // The FuseActivation parameter is always the input index 2, and it should be optional
Mike Kelly46272802019-08-14 17:00:48 +01002811 ActivationFn activationFunction;
2812 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2813 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002814 return Fail("%s: Operation has invalid optional input: activation function", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002815 }
2816
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002817 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002818 if (!output)
2819 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002820 return Fail("%s: Could not read output", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002821 }
2822
2823 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002824
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002825 armnn::ElementwiseBinaryDescriptor descriptor(binaryOperation);
2826
Mike Kelly46272802019-08-14 17:00:48 +01002827 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002828 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2829 {
2830 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002831 IsElementwiseBinarySupported,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002832 data.m_Backends,
2833 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002834 armnn::BackendId(),
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002835 input0.GetTensorInfo(),
2836 input1.GetTensorInfo(),
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002837 outputInfo,
2838 binaryOperation);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002839 };
2840
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002841 if (!IsDynamicTensor(outputInfo))
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002842 {
2843 validateFunc(outputInfo, isSupported);
2844 }
2845 else
2846 {
2847 isSupported = AreDynamicTensorsSupported();
2848 }
2849
Mike Kelly46272802019-08-14 17:00:48 +01002850 if (!isSupported)
2851 {
2852 return false;
2853 }
2854
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002855 armnn::IConnectableLayer* layer = data.m_Network->AddElementwiseBinaryLayer(descriptor);
2856 if (!layer)
2857 {
2858 return Fail("%s: Could not add the ElementwiseBinaryLayer", __func__);
2859 }
2860 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
Kevin Mayfcf2a152020-09-08 16:06:32 +01002861 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002862 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002863 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002864 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002865
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002866 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc,
2867 activationFunction);
Renato Grottesi1b2abb72023-05-08 06:58:44 +00002868}
Teresa Charlinee5872d2021-12-03 16:07:42 +00002869
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002870
Mike Kelly46272802019-08-14 17:00:48 +01002871template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002872 typename HalOperation = typename HalPolicy::Operation,
2873 typename HalModel = typename HalPolicy::Model>
2874bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002875{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002876 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002877
2878 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2879 if (!input.IsValid())
2880 {
2881 return Fail("%s: Operation has invalid inputs", __func__);
2882 }
2883
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002884 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002885 if (!outputOperand)
2886 {
2887 return Fail("%s: Operation has invalid outputs", __func__);
2888 }
2889
2890 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002891
2892 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002893 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002894 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2895 {
2896 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2897 IsFloorSupported,
2898 data.m_Backends,
2899 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002900 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002901 input.GetTensorInfo(),
2902 outputInfo);
2903 };
2904
2905 if(!IsDynamicTensor(outputInfo))
2906 {
2907 validateFunc(outputInfo, isSupported);
2908 }
2909 else
2910 {
2911 isSupported = AreDynamicTensorsSupported();
2912 }
2913
Mike Kelly46272802019-08-14 17:00:48 +01002914 if (!isSupported)
2915 {
2916 return false;
2917 }
2918
2919 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
Renato Grottesi77a0fb02023-05-08 12:55:03 +00002920 layer->SetBackendId(setBackend);
2921 if (!layer)
2922 {
2923 return Fail("%s: Could not add the FloorLayer", __func__);
2924 }
Mike Kelly46272802019-08-14 17:00:48 +01002925 input.Connect(layer->GetInputSlot(0));
2926
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002927 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002928}
2929
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002930inline bool IsQSymm8(const V1_0::Operand&)
2931{
2932 return false;
2933}
2934
Kevin May42477c12020-03-26 13:34:14 +00002935#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002936
2937inline bool IsQSymm8(const V1_2::Operand& operand)
2938{
2939 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2940}
2941
2942#endif
2943
Kevin May42477c12020-03-26 13:34:14 +00002944#ifdef ARMNN_ANDROID_NN_V1_3
2945
2946inline bool IsQSymm8(const V1_3::Operand& operand)
2947{
2948 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2949}
2950
2951#endif
2952
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002953enum class DequantizeStatus
2954{
2955 SUCCESS,
2956 NOT_REQUIRED,
2957 INVALID_OPERAND
2958};
2959
2960using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2961
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002962template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002963 typename HalOperation = typename HalPolicy::Operation,
2964 typename HalModel = typename HalPolicy::Model>
2965DequantizeResult DequantizeIfRequired(size_t operand_index,
2966 const HalOperation& operation,
2967 const HalModel& model,
2968 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002969{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002970 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002971
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002972 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002973 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002974 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002975 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002976 }
2977
2978 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2979 {
2980 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002981 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002982 }
2983
2984 const size_t weightsInputIndex = operation.inputs[operand_index];
2985
2986 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2987 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002988 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002989 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002990 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002991 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002992 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2993 {
2994 continue;
2995 }
2996
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002997 size_t outOpIndex = weightsInputIndex + 1;
2998 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002999 {
3000 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003001 }
3002
3003 if (outOpIndex != weightsInputIndex)
3004 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003005 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003006 }
3007
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003008 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003009
3010 if (!operand)
3011 {
3012 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3013 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003014
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003015 if (!IsQSymm8(*operand))
3016 {
3017 // Only supporting dequantize from QSYMM8 to FLOAT
3018 break;
3019 }
3020
3021 // Allocate a new buffer for the dequantized data and manually dequantize
3022 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
3023 if (!startValue)
3024 {
3025 // Failed to get the operand address
3026 break;
3027 }
3028
3029 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
3030 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003031 const float quantizationScale = operand->scale;
3032
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003033 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
3034 for (size_t i = 0; i < dequantizedBufferLength; ++i)
3035 {
3036 float* dstPtr = dequantizedBuffer.get();
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003037
3038 if (!dstPtr)
3039 {
3040 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3041 }
3042 *dstPtr = quantizedBuffer[i] * quantizationScale;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003043 }
3044
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003045 // Construct tensor info for dequantized ConstTensor
3046 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
3047 operand->dimensions.data(),
3048 armnn::DataType::Float32);
3049
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003050 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
3051 std::move(tensorInfo),
3052 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003053 }
3054
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003055 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003056}
3057
3058template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003059 typename HalOperation = typename HalPolicy::Operation,
3060 typename HalModel = typename HalPolicy::Model>
3061ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
3062 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003063 const ConversionData& data,
3064 size_t operandIndex,
3065 bool optional = false)
3066{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003067 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
3068
3069 DequantizeStatus status = std::get<3>(dequantized);
3070 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003071 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003072 case DequantizeStatus::INVALID_OPERAND:
3073 {
3074 // return invalid const tensor pin
3075 return ConstTensorPin();
3076 }
3077 case DequantizeStatus::NOT_REQUIRED:
3078 {
3079 return ConvertOperationInputToConstTensorPin<HalPolicy>(
3080 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3081 }
3082 case DequantizeStatus::SUCCESS:
3083 default:
3084 {
3085 return ConstTensorPin(
3086 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3087 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003088 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003089}
3090
3091
Mike Kelly46272802019-08-14 17:00:48 +01003092template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003093 typename HalOperation = typename HalPolicy::Operation,
3094 typename HalModel = typename HalPolicy::Model>
3095bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003096{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003097 using HalOperand = typename HalPolicy::Operand;
3098
Mike Kelly46272802019-08-14 17:00:48 +01003099 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3100 if (!input.IsValid())
3101 {
3102 return Fail("%s: Operation has invalid inputs", __func__);
3103 }
3104
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003105 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003106 if (!output)
3107 {
3108 return Fail("%s: Could not read output 0", __func__);
3109 }
3110
3111 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3112 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3113
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003114 LayerInputHandle weightsInput = LayerInputHandle();
3115 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3116 if (!weightsOperand)
Mike Kelly46272802019-08-14 17:00:48 +01003117 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003118 return Fail("%s: Could not read weights", __func__);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003119 }
3120
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003121 // If weights are constant a separate constant layer will be created to store data.
3122 // Otherwise handle non const weights as inputs.
3123 weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3124 if (!weightsInput.IsValid())
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003125 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003126 return Fail("%s: Operation has invalid inputs", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003127 }
3128
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003129 LayerInputHandle biasInput = LayerInputHandle();
3130 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3131 if (!biasOperand)
3132 {
3133 return Fail("%s: Could not read bias", __func__);
3134 }
3135
3136 // If bias are constant a separate constant layer will be created to store data.
3137 // Otherwise handle non const bias as inputs.
3138 biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
3139 if (!biasInput.IsValid())
3140 {
3141 return Fail("%s: Operation has invalid inputs", __func__);
3142 }
3143
3144 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
Mike Kelly46272802019-08-14 17:00:48 +01003145 armnn::TensorInfo reshapedInfo = inputInfo;
Mike Kelly46272802019-08-14 17:00:48 +01003146 try
3147 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003148 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003149 }
3150 catch (const std::exception& e)
3151 {
Mike Kelly46272802019-08-14 17:00:48 +01003152 return Fail("%s: %s", __func__, e.what());
3153 }
3154
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003155 // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
3156 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
3157 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
Mike Kelly46272802019-08-14 17:00:48 +01003158
3159 ActivationFn activationFunction;
3160 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3161 {
3162 return Fail("%s: Operation has invalid inputs", __func__);
3163 }
3164
3165 armnn::FullyConnectedDescriptor desc;
3166 desc.m_TransposeWeightMatrix = true;
3167 desc.m_BiasEnabled = true;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003168 desc.m_ConstantWeights = IsOperandConstant<HalPolicy>(*weightsOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003169
3170 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003171 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003172 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3173 {
Finn Williams49184462020-10-02 13:28:34 +01003174 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003175 weightsInfo.GetShape(),
Finn Williams49184462020-10-02 13:28:34 +01003176 outputInfo.GetShape(),
3177 desc.m_TransposeWeightMatrix))
3178 {
3179 isSupported = false;
3180 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3181 return;
3182 }
3183
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003184 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003185 IsFullyConnectedSupported,
3186 data.m_Backends,
3187 isSupported,
3188 setBackend,
3189 reshapedInfo,
3190 outputInfo,
3191 weightsInfo,
3192 biasInfo,
3193 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003194 };
3195
3196 if(!IsDynamicTensor(outputInfo))
3197 {
3198 validateFunc(outputInfo, isSupported);
3199 }
3200 else
3201 {
3202 isSupported = AreDynamicTensorsSupported();
3203 }
3204
Mike Kelly46272802019-08-14 17:00:48 +01003205 if (!isSupported)
3206 {
3207 return false;
3208 }
3209
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003210 // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
3211 armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
3212 startLayer->SetBackendId(setBackend);
Mike Kelly46272802019-08-14 17:00:48 +01003213
Kevin Mayfcf2a152020-09-08 16:06:32 +01003214 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003215 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003216 armnn::ReshapeDescriptor reshapeDescriptor;
3217 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003218
Kevin Mayfcf2a152020-09-08 16:06:32 +01003219 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003220 if (!reshapeLayer)
3221 {
3222 return Fail("%s: could not add the reshapeLayer", __func__);
3223 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003224 input.Connect(reshapeLayer->GetInputSlot(0));
3225 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3226 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003227 }
3228 else
3229 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003230 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003231 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003232
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003233 // Connect weights and bias inputs
3234 weightsInput.Connect(startLayer->GetInputSlot(1));
3235 biasInput.Connect(startLayer->GetInputSlot(2));
3236
Kevin Mayfcf2a152020-09-08 16:06:32 +01003237 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3238 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003239}
3240
3241template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003242 typename HalOperation = typename HalPolicy::Operation,
3243 typename HalModel = typename HalPolicy::Model>
3244bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003245{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003246 using HalOperand = typename HalPolicy::Operand;
3247
Mike Kelly999e2092019-08-15 10:46:46 +01003248 if (operation.inputs.size() != 1)
3249 {
3250 return Fail("%s: Optional inputs are not supported", __func__);
3251 }
3252
Mike Kelly46272802019-08-14 17:00:48 +01003253 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3254 if (!input.IsValid())
3255 {
3256 return Fail("%s: Operation has invalid inputs", __func__);
3257 }
3258
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003259 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003260 if (!output)
3261 {
3262 return Fail("%s: Could not read output 0", __func__);
3263 }
3264
3265 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3266 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3267
Mike Kelly46272802019-08-14 17:00:48 +01003268 if (outputInfo.GetNumDimensions() != 4u)
3269 {
3270 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3271 }
3272
3273 armnn::L2NormalizationDescriptor desc;
3274 desc.m_DataLayout = armnn::DataLayout::NHWC;
3275
3276 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003277 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003278 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3279 {
3280 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3281 IsL2NormalizationSupported,
3282 data.m_Backends,
3283 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003284 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003285 inputInfo,
3286 outputInfo,
3287 desc);
3288 };
3289
3290 if(!IsDynamicTensor(outputInfo))
3291 {
3292 validateFunc(outputInfo, isSupported);
3293 }
3294 else
3295 {
3296 isSupported = AreDynamicTensorsSupported();
3297 }
3298
Mike Kelly46272802019-08-14 17:00:48 +01003299 if (!isSupported)
3300 {
3301 return false;
3302 }
3303
3304 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003305 layer->SetBackendId(setBackend);
3306 if (!layer)
3307 {
3308 return Fail("%s: Could not add the L2NormalizationLayer", __func__);
3309 }
Mike Kelly46272802019-08-14 17:00:48 +01003310 input.Connect(layer->GetInputSlot(0));
3311
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003312 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003313}
3314
3315template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003316 typename HalOperation = typename HalPolicy::Operation,
3317 typename HalModel = typename HalPolicy::Model>
3318bool ConvertLocalResponseNormalization(const HalOperation& operation,
3319 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003320 ConversionData& data)
3321{
Mike Kelly999e2092019-08-15 10:46:46 +01003322 if (operation.inputs.size() != 5)
3323 {
3324 return Fail("%s: Optional inputs are not supported", __func__);
3325 }
3326
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003327 using HalOperand = typename HalPolicy::Operand;
3328 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003329
3330 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3331 if (!input.IsValid())
3332 {
3333 return Fail("%s: Operation has invalid inputs", __func__);
3334 }
3335
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003336 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003337 if (!output)
3338 {
3339 return Fail("%s: Could not read output 0", __func__);
3340 }
3341
3342 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3343 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3344
Mike Kelly46272802019-08-14 17:00:48 +01003345 if (outputInfo.GetNumDimensions() != 4u)
3346 {
3347 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3348 }
3349
3350 armnn::NormalizationDescriptor descriptor;
3351 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3352 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3353 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3354
3355 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003356 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003357 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3358 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3359 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3360 {
3361 return Fail("%s: Operation has invalid inputs", __func__);
3362 }
3363
3364 // ArmNN expects normSize to be the full size of the normalization
3365 // window rather than the radius as in AndroidNN.
3366 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3367
3368 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003369 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003370 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3371 {
3372 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3373 IsNormalizationSupported,
3374 data.m_Backends,
3375 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003376 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003377 inputInfo,
3378 outputInfo,
3379 descriptor);
3380 };
3381
3382 if(!IsDynamicTensor(outputInfo))
3383 {
3384 validateFunc(outputInfo, isSupported);
3385 }
3386 else
3387 {
3388 isSupported = AreDynamicTensorsSupported();
3389 }
3390
Mike Kelly46272802019-08-14 17:00:48 +01003391 if (!isSupported)
3392 {
3393 return false;
3394 }
3395
Mike Kelly46272802019-08-14 17:00:48 +01003396 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003397 layer->SetBackendId(setBackend);
3398 if (!layer)
3399 {
3400 return Fail("%s: Could not add the NormalizationLayer", __func__);
3401 }
Mike Kelly46272802019-08-14 17:00:48 +01003402 input.Connect(layer->GetInputSlot(0));
3403
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003404 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003405}
3406
3407template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003408 typename HalOperation = typename HalPolicy::Operation,
3409 typename HalModel = typename HalPolicy::Model>
3410bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003411{
Mike Kelly46272802019-08-14 17:00:48 +01003412 armnn::ActivationDescriptor desc;
3413 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3414
3415 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3416}
3417
3418template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003419 typename HalOperation = typename HalPolicy::Operation,
3420 typename HalModel = typename HalPolicy::Model>
3421bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003422{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003423 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003424
3425 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3426 if (!input.IsValid())
3427 {
3428 return Fail("%s: Operation has invalid inputs", __func__);
3429 }
3430
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003431 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003432 if (!output)
3433 {
3434 return Fail("%s: Could not read output 0", __func__);
3435 }
3436
3437 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003438
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003439 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003440 if (!axisOperand)
3441 {
3442 return Fail("%s: Could not read input 1", __func__);
3443 }
3444
3445 std::vector<int32_t> axis;
3446 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3447 {
3448 return Fail("%s: Input 1 has invalid values", __func__);
3449 }
3450
3451 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3452
3453 // Convert the axis to unsigned int and remove duplicates.
3454 unsigned int rank = inputInfo.GetNumDimensions();
3455 std::set<unsigned int> uniqueAxis;
3456 std::transform(axis.begin(), axis.end(),
3457 std::inserter(uniqueAxis, uniqueAxis.begin()),
3458 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3459
3460 // Get the "keep dims" flag.
3461 int32_t keepDims = 0;
3462 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3463 {
3464 return Fail("%s: Could not read input 2", __func__);
3465 }
3466
3467 armnn::MeanDescriptor descriptor;
3468 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3469 descriptor.m_KeepDims = keepDims > 0;
3470
3471 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003472 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003473 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3474 {
3475 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3476 IsMeanSupported,
3477 data.m_Backends,
3478 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003479 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003480 inputInfo,
3481 outputInfo,
3482 descriptor);
3483 };
3484
3485 if(!IsDynamicTensor(outputInfo))
3486 {
3487 validateFunc(outputInfo, isSupported);
3488 }
3489 else
3490 {
3491 isSupported = AreDynamicTensorsSupported();
3492 }
3493
Mike Kelly46272802019-08-14 17:00:48 +01003494 if (!isSupported)
3495 {
3496 return false;
3497 }
3498
3499 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003500 layer->SetBackendId(setBackend);
3501 if (!layer)
3502 {
3503 return Fail("%s: Could not add the MeanLayer", __func__);
3504 }
Mike Kelly46272802019-08-14 17:00:48 +01003505 input.Connect(layer->GetInputSlot(0));
3506
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003507 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003508}
3509
3510template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003511 typename HalOperation = typename HalPolicy::Operation,
3512 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003513bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003514{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003515 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003516
Mike Kelly3c673942019-07-25 09:26:06 +01003517 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3518 if (!input.IsValid())
3519 {
3520 return Fail("%s: Operation has invalid inputs", __func__);
3521 }
3522
3523 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3524 unsigned int rank = inputInfo.GetNumDimensions();
3525
3526 armnn::PadDescriptor descriptor;
3527 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3528 {
3529 return Fail("%s: Could not convert paddings", __func__);
3530 }
3531
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003532 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3533 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003534 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3535 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3536 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003537 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003538 {
3539 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3540 }
3541
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003542 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003543 if (!output)
3544 {
3545 return Fail("%s: Could not read output", __func__);
3546 }
3547
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003548 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003549
3550 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003551 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003552 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3553 {
3554 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3555 IsPadSupported,
3556 data.m_Backends,
3557 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003558 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003559 inputInfo,
3560 outputInfo,
3561 descriptor);
3562 };
3563
3564 if(!IsDynamicTensor(outputInfo))
3565 {
3566 validateFunc(outputInfo, isSupported);
3567 }
3568 else
3569 {
3570 isSupported = AreDynamicTensorsSupported();
3571 }
3572
Mike Kelly3c673942019-07-25 09:26:06 +01003573 if (!isSupported)
3574 {
3575 return false;
3576 }
3577
3578 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003579 layer->SetBackendId(setBackend);
3580 if (!layer)
3581 {
3582 return Fail("%s: Could not add the PadLayer", __func__);
3583 }
Mike Kelly3c673942019-07-25 09:26:06 +01003584 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003585
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003586 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003587}
3588
Mike Kelly0a879362019-07-29 16:56:31 +01003589template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003590 typename HalOperation = typename HalPolicy::Operation,
3591 typename HalModel = typename HalPolicy::Model>
3592bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003593{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003594 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003595
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003596 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3597 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3598 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003599
3600 if (inputOperand == nullptr
3601 || requestedShapeOperand == nullptr
3602 || outputOperand == nullptr)
3603 {
3604 return Fail("%s: Operation has invalid inputs", __func__);
3605 }
3606
3607 if (requestedShapeOperand->dimensions.size() != 1)
3608 {
3609 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3610 __func__, requestedShapeOperand->dimensions.size());
3611 }
3612
3613 std::vector<int32_t> targetDimensions;
3614 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3615 {
3616 return Fail("%s: Could not read values of input 1", __func__);
3617 }
3618
3619 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3620
3621 Shape requestedShape;
3622 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3623 // function that resolves these values into a fully specified tensor shape.
3624 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3625 {
3626 return Fail("%s: Failed to resolve the requested shape", __func__);
3627 }
3628
Mike Kelly46272802019-08-14 17:00:48 +01003629 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3630 if (!input.IsValid())
3631 {
3632 return Fail("%s: Could not read input 0", __func__);
3633 }
3634
3635 armnn::ReshapeDescriptor reshapeDescriptor;
3636 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3637 requestedShape.dimensions.data());
3638
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003639 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3640
Mike Kelly46272802019-08-14 17:00:48 +01003641 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003642 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003643 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3644 {
3645 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3646 IsReshapeSupported,
3647 data.m_Backends,
3648 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003649 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003650 input.GetTensorInfo(),
3651 outputInfo,
3652 reshapeDescriptor);
3653 };
3654
3655 if(!IsDynamicTensor(outputInfo))
3656 {
3657 validateFunc(outputInfo, isSupported);
3658 }
3659 else
3660 {
3661 isSupported = AreDynamicTensorsSupported();
3662 }
3663
Mike Kelly46272802019-08-14 17:00:48 +01003664 if (!isSupported)
3665 {
3666 return false;
3667 }
3668
3669 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003670 layer->SetBackendId(setBackend);
3671 if (!layer)
3672 {
3673 return Fail("%s: Could not add the ReshapeLayer", __func__);
3674 }
Mike Kelly46272802019-08-14 17:00:48 +01003675 input.Connect(layer->GetInputSlot(0));
3676
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003677 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003678}
3679
3680template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003681 typename HalOperation = typename HalPolicy::Operation,
3682 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003683bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003684{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003685 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003686
3687 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3688 if (!input.IsValid())
3689 {
3690 return Fail("%s: Operation has invalid inputs", __func__);
3691 }
3692
3693 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3694 unsigned int rank = inputInfo.GetNumDimensions();
3695 if (rank > 4)
3696 {
3697 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3698 }
3699
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003700 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003701 if (!output)
3702 {
3703 return Fail("%s: Could not read output 0", __func__);
3704 }
3705
Sadik Armagan346e8112020-09-02 09:55:14 +01003706 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003707 {
3708 return Fail("%s: Dynamic output tensors are not supported", __func__);
3709 }
3710
3711 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3712 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003713 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003714
Mike Kelly46272802019-08-14 17:00:48 +01003715 std::vector<int32_t> axis;
3716 if (!axisOperand)
3717 {
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003718 for (unsigned int i = 0; i < rank; ++i)
3719 {
3720 axis.push_back(static_cast<unsigned int>(i));
3721 }
Mike Kelly46272802019-08-14 17:00:48 +01003722 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003723 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003724 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003725 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003726 }
3727
3728 std::vector<uint32_t> outputDims;
3729 for (unsigned int i = 0; i < rank; i++)
3730 {
3731 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3732 auto currentDimension = inputInfo.GetShape()[i];
3733 if (skipSqueeze || currentDimension != 1)
3734 {
3735 outputDims.push_back(currentDimension);
3736 }
3737 }
3738
3739 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3740
3741 armnn::TensorInfo outputInfo = inputInfo;
3742 outputInfo.SetShape(outShape);
3743
3744 armnn::ReshapeDescriptor reshapeDesc;
3745 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3746
3747 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003748 armnn::BackendId setBackend;
Mike Kelly46272802019-08-14 17:00:48 +01003749 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3750 IsReshapeSupported,
3751 data.m_Backends,
3752 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003753 setBackend,
Mike Kelly46272802019-08-14 17:00:48 +01003754 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003755 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003756 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003757
Mike Kelly46272802019-08-14 17:00:48 +01003758 if (!isSupported)
3759 {
3760 return false;
3761 }
3762
3763 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003764 layer->SetBackendId(setBackend);
3765 if (!layer)
3766 {
3767 return Fail("%s: Could not add the ReshapeLayer", __func__);
3768 }
Mike Kelly46272802019-08-14 17:00:48 +01003769 input.Connect(layer->GetInputSlot(0));
3770
3771 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3772}
3773
3774template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003775 typename HalOperation = typename HalPolicy::Operation,
3776 typename HalModel = typename HalPolicy::Model>
3777bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003778{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003779 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003780
3781 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3782 if (!input.IsValid())
3783 {
3784 return Fail("%s: Operation has invalid inputs", __func__);
3785 }
3786
3787 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3788 unsigned int rank = inputInfo.GetNumDimensions();
3789 if (rank > 4)
3790 {
3791 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3792 }
3793
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003794 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003795 if (!output)
3796 {
3797 return Fail("%s: Could not read output 0", __func__);
3798 }
3799
3800 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003801
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003802 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3803 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3804 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003805
3806 std::vector<int32_t> beginValues;
3807 std::vector<int32_t> endValues;
3808 std::vector<int32_t> stridesValues;
3809
3810 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003811 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003812 {
3813 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3814 {
3815 return false;
3816 }
3817
3818 if (operandValues.size() != rank)
3819 {
3820 return false;
3821 }
3822
3823 return true;
3824 };
3825
3826 if (!ValidateInputOperands(*beginOperand, beginValues)
3827 || !ValidateInputOperands(*endOperand, endValues)
3828 || !ValidateInputOperands(*stridesOperand, stridesValues))
3829 {
3830 return Fail("%s: Operation has invalid input operand", __func__);
3831 }
3832
3833 // Stride cannot have value '0'
3834 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3835 {
3836 return Fail("%s: Stride must be non-zero value.", __func__);
3837 }
3838
3839 armnn::StridedSliceDescriptor descriptor;
3840 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3841 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3842 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3843 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3844
3845 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3846 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3847 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3848 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3849 {
3850 return Fail("%s: Operation has invalid inputs", __func__);
3851 }
3852
3853 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003854 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003855 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3856 {
3857 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3858 IsStridedSliceSupported,
3859 data.m_Backends,
3860 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003861 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003862 inputInfo,
3863 outputInfo,
3864 descriptor);
3865 };
3866
3867 if(IsDynamicTensor(outputInfo))
3868 {
3869 isSupported = AreDynamicTensorsSupported();
3870 }
3871 else
3872 {
3873 validateFunc(outputInfo, isSupported);
3874 }
3875
Mike Kelly46272802019-08-14 17:00:48 +01003876 if (!isSupported)
3877 {
3878 return false;
3879 }
3880
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003881 // Check if slice can fit in a inferred output
3882 armnn::TensorShape inputShape = inputInfo.GetShape();
3883 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3884 {
3885 int stride = descriptor.m_Stride[i];
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003886
3887 if (descriptor.m_ShrinkAxisMask & (1 << i))
3888 {
3889 // If the difference between the start point and the end point of the slice on an axis being shrunk
3890 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3891 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3892 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3893 {
3894 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3895 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003896
3897 if(stride < 0)
3898 {
3899 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3900 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003901 }
3902 }
3903
Mike Kelly46272802019-08-14 17:00:48 +01003904 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003905 layer->SetBackendId(setBackend);
3906 if (!layer)
3907 {
3908 return Fail("%s: Could not add the StridedSliceLayer", __func__);
3909 }
Mike Kelly46272802019-08-14 17:00:48 +01003910 input.Connect(layer->GetInputSlot(0));
3911
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003912 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003913}
3914
3915template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003916 typename HalOperation = typename HalPolicy::Operation,
3917 typename HalModel = typename HalPolicy::Model>
3918bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003919{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003920 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01003921 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01003922
3923 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3924 if (!input.IsValid())
3925 {
3926 return Fail("%s: Operation has invalid inputs", __func__);
3927 }
3928
3929 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3930 unsigned int rank = inputInfo.GetNumDimensions();
3931 if (rank > 4)
3932 {
3933 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3934 }
3935
3936 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3937 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003938 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003939
3940 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01003941 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01003942 {
Mike Kelly46272802019-08-14 17:00:48 +01003943 for (unsigned int i = rank; i > 0; i--)
3944 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01003945 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01003946 }
3947 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003948 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003949 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003950 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003951 }
3952
3953 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3954
Mike Kelly4a956582020-02-28 10:32:09 +00003955 armnn::TransposeDescriptor transposeDesc;
3956 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003957
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003958 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003959 if (!output)
3960 {
3961 return Fail("%s: Could not read output 0", __func__);
3962 }
3963
3964 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3965
3966 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003967 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003968 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3969 {
3970 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3971 IsTransposeSupported,
3972 data.m_Backends,
3973 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003974 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003975 inputInfo,
3976 outputInfo,
3977 transposeDesc);
3978 };
3979
3980 if(IsDynamicTensor(outputInfo))
3981 {
3982 isSupported = AreDynamicTensorsSupported();
3983 }
3984 else
3985 {
3986 validateFunc(outputInfo, isSupported);
3987 }
3988
Mike Kelly46272802019-08-14 17:00:48 +01003989 if (!isSupported)
3990 {
3991 return false;
3992 }
3993
Mike Kelly4a956582020-02-28 10:32:09 +00003994 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00003995 layer->SetBackendId(setBackend);
3996 if (!layer)
3997 {
3998 return Fail("%s: Could not add the TransposeLayer", __func__);
3999 }
Mike Kelly46272802019-08-14 17:00:48 +01004000 input.Connect(layer->GetInputSlot(0));
4001
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004002 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004003}
4004
4005template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01004006 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01004007 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01004008 typename HalModel = typename HalPolicy::Model>
4009bool ConvertBatchToSpaceNd(const HalOperation& operation,
4010 const HalModel& model,
4011 ConversionData& data)
4012{
Finn Williams23b87b32019-07-30 11:44:05 +01004013
4014 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4015 if (!input.IsValid())
4016 {
4017 return Fail("%s: Operation has invalid inputs", __func__);
4018 }
4019
4020 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4021 if (!output)
4022 {
4023 return Fail("%s: Could not read output 0", __func__);
4024 }
4025
4026 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004027
4028 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4029 if (!blockOperand)
4030 {
4031 return Fail("%s: Could not read input 1", __func__);
4032 }
4033
4034 // Convert the block operand to int32
4035 std::vector<int32_t> block;
4036 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4037 {
4038 return Fail("%s: Input 1 has invalid values", __func__);
4039 }
4040
4041 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4042
4043 unsigned int rank = inputInfo.GetNumDimensions();
4044 if (rank != 4)
4045 {
4046 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4047 }
4048
4049 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4050 {
4051 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4052 " greater than or equal to 1", __func__);
4053 }
4054
4055 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4056 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4057 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4058
Kevin May42477c12020-03-26 13:34:14 +00004059 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004060 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004061 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004062 }
4063 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4064 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4065
4066 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00004067 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004068 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4069 {
4070 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4071 IsBatchToSpaceNdSupported,
4072 data.m_Backends,
4073 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00004074 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004075 inputInfo,
4076 outputInfo,
4077 batchToSpaceNdDesc);
4078 };
4079
4080 if(!IsDynamicTensor(outputInfo))
4081 {
4082 validateFunc(outputInfo, isSupported);
4083 }
4084 else
4085 {
4086 isSupported = AreDynamicTensorsSupported();
4087 }
4088
4089
Finn Williams23b87b32019-07-30 11:44:05 +01004090 if (!isSupported)
4091 {
4092 return false;
4093 }
4094
4095 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00004096 layer->SetBackendId(setBackend);
4097 if (!layer)
4098 {
4099 return Fail("%s: Could not add the BatchToSpaceNdLayer", __func__);
4100 }
Finn Williams23b87b32019-07-30 11:44:05 +01004101 input.Connect(layer->GetInputSlot(0));
4102
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004103 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004104}
Mike Kelly0a879362019-07-29 16:56:31 +01004105
Finn Williamsd74c5052019-07-30 17:06:00 +01004106template<typename HalPolicy,
4107 typename HalOperation = typename HalPolicy::Operation,
4108 typename HalOperand = typename HalPolicy::Operand,
4109 typename HalModel = typename HalPolicy::Model>
4110bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4111{
4112 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4113 if (!input.IsValid())
4114 {
4115 return Fail("%s: Operation has invalid inputs", __func__);
4116 }
4117
4118 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4119 unsigned int rank = inputInfo.GetNumDimensions();
4120 unsigned int spatialDim = rank - 2;
4121
4122 if (rank != 4)
4123 {
4124 Fail("%s: Only inputs with rank 4 are supported", __func__);
4125 }
4126
4127 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4128 if (!output)
4129 {
4130 return Fail("%s: Could not read output 0", __func__);
4131 }
4132
4133 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004134
4135 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4136 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4137
4138 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4139 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4140 {
4141 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4142 }
4143
4144 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004145 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4146 {
4147 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4148 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004149 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4150 {
4151 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4152 }
4153
4154 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4155 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4156 {
4157 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4158 }
4159
4160 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4161 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004162 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4163 {
4164 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4165 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004166 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4167 {
4168 int paddingBeforeInput = paddings[i];
4169 int paddingAfterInput = paddings[i + 1];
4170 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4171 {
4172 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4173 }
4174
Renato Grottesi77a0fb02023-05-08 12:55:03 +00004175 paddingList.emplace_back(static_cast<unsigned int>(paddingBeforeInput),
4176 static_cast<unsigned int>(paddingAfterInput));
Finn Williamsd74c5052019-07-30 17:06:00 +01004177 }
4178
4179 armnn::SpaceToBatchNdDescriptor descriptor;
4180 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4181 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4182 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4183
Kevin May42477c12020-03-26 13:34:14 +00004184 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004185 {
4186 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4187 }
4188
4189 bool isSupported = false;
Renato Grottesi77a0fb02023-05-08 12:55:03 +00004190 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004191 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4192 {
4193 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4194 IsSpaceToBatchNdSupported,
4195 data.m_Backends,
4196 isSupported,
Renato Grottesi77a0fb02023-05-08 12:55:03 +00004197 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004198 inputInfo,
4199 outputInfo,
4200 descriptor);
4201 };
4202
4203 if(IsDynamicTensor(outputInfo))
4204 {
4205 isSupported = AreDynamicTensorsSupported();
4206 }
4207 else
4208 {
4209 validateFunc(outputInfo, isSupported);
4210 }
4211
Finn Williamsd74c5052019-07-30 17:06:00 +01004212 if (!isSupported)
4213 {
4214 return false;
4215 }
4216
4217 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
Renato Grottesi77a0fb02023-05-08 12:55:03 +00004218 layer->SetBackendId(setBackend);
4219 if (!layer)
4220 {
4221 return Fail("%s: Could not add the BatchToSpaceLayer", __func__);
4222 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004223 input.Connect(layer->GetInputSlot(0));
4224
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004225 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004226}
4227
saoste01b8471482018-10-10 09:44:51 +01004228} // namespace armnn_driver
Kevin DuBoisa2cb5482020-08-26 13:41:12 -07004229#ifdef __clang__
4230#pragma clang diagnostic pop
4231#endif