Skip to content

Instantly share code, notes, and snippets.

@hewumars
Last active July 21, 2020 02:33
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save hewumars/86fc8e39aa64562bbaf77030b9601726 to your computer and use it in GitHub Desktop.
Save hewumars/86fc8e39aa64562bbaf77030b9601726 to your computer and use it in GitHub Desktop.

以samplePlugin的FCPlugin为例,继承IPluginExt实现自定义插件 IPluginExt也快被废弃,建议用IPluginV2的相关插件。

class FCPlugin : public nvinfer1::IPluginExt
{
public:
    FCPlugin(const nvinfer1::Weights* weights, int nbWeights, int nbOutputChannels)
        : mNbOutputChannels(nbOutputChannels)
    {
        assert(nbWeights == 2);

        mKernelWeights = weights[0];
        assert(mKernelWeights.type == nvinfer1::DataType::kFLOAT || mKernelWeights.type == nvinfer1::DataType::kHALF);

        mBiasWeights = weights[1];
        assert(mBiasWeights.count == 0 || mBiasWeights.count == nbOutputChannels);
        assert(mBiasWeights.type == nvinfer1::DataType::kFLOAT || mBiasWeights.type == nvinfer1::DataType::kHALF);

        mKernelWeights.values = malloc(mKernelWeights.count * type2size(mKernelWeights.type));
        std::memcpy(const_cast<void*>(mKernelWeights.values), weights[0].values,
            mKernelWeights.count * type2size(mKernelWeights.type));
        mBiasWeights.values = malloc(mBiasWeights.count * type2size(mBiasWeights.type));
        std::memcpy(const_cast<void*>(mBiasWeights.values), weights[1].values,
            mBiasWeights.count * type2size(mBiasWeights.type));

        mNbInputChannels = int(weights[0].count / nbOutputChannels);
    }

    // create the plugin at runtime from a byte stream
    FCPlugin(const void* data, size_t length)
    {
        const char *d = static_cast<const char*>(data), *a = d;
        read(d, mNbInputChannels);
        read(d, mNbOutputChannels);

        mKernelWeights.count = mNbInputChannels * mNbOutputChannels;
        mKernelWeights.values = nullptr;

        read(d, mBiasWeights.count);
        mBiasWeights.values = nullptr;

        read(d, mDataType);

        deserializeToDevice(d, mDeviceKernel, mKernelWeights.count * type2size(mDataType));
        deserializeToDevice(d, mDeviceBias, mBiasWeights.count * type2size(mDataType));
        assert(d == a + length);
    }

    ~FCPlugin()
    {
        if (mKernelWeights.values)
        {
            free(const_cast<void*>(mKernelWeights.values));
            mKernelWeights.values = nullptr;
        }
        if (mBiasWeights.values)
        {
            free(const_cast<void*>(mBiasWeights.values));
            mBiasWeights.values = nullptr;
        }
    }

    int getNbOutputs() const override
    {
        return 1;
    }

    nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) override
    {
        assert(index == 0 && nbInputDims == 1 && inputs[0].nbDims == 3);
        assert(mNbInputChannels == inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2]);
        return nvinfer1::Dims3(mNbOutputChannels, 1, 1);
    }

    bool supportsFormat(nvinfer1::DataType type, nvinfer1::PluginFormat format) const override
    {
        int device;
        CHECK(cudaGetDevice(&device));
        cudaDeviceProp props{};
        cudaGetDeviceProperties(&props, device);
        int smVersion = props.major << 8 | props.minor;
        // Half precision is supported after SM60
        return (type == nvinfer1::DataType::kFLOAT || (type == nvinfer1::DataType::kHALF && smVersion >= 0x600))
            && format == nvinfer1::PluginFormat::kNCHW;
    }

    void configureWithFormat(const nvinfer1::Dims* inputDims, int nbInputs, const nvinfer1::Dims* outputDims,
        int nbOutputs, nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) override
    {
        assert((type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF)
            && format == nvinfer1::PluginFormat::kNCHW);
        mDataType = type;
    }

    int initialize() override
    {
        CHECK(cudnnCreate(&mCudnn)); // initialize cudnn and cublas
        CHECK(cublasCreate(&mCublas));
        CHECK(
            cudnnCreateTensorDescriptor(&mSrcDescriptor)); // create cudnn tensor descriptors we need for bias addition
        CHECK(cudnnCreateTensorDescriptor(&mDstDescriptor));
        if (mKernelWeights.values)
        {
            convertAndCopyToDevice(mDeviceKernel, mKernelWeights);
        }
        if (mBiasWeights.values)
        {
            convertAndCopyToDevice(mDeviceBias, mBiasWeights);
        }

        return 0;
    }

    virtual void terminate() override
    {
        CHECK(cudnnDestroyTensorDescriptor(mSrcDescriptor));
        CHECK(cudnnDestroyTensorDescriptor(mDstDescriptor));
        CHECK(cublasDestroy(mCublas));
        CHECK(cudnnDestroy(mCudnn));
        if (mDeviceKernel)
        {
            cudaFree(mDeviceKernel);
            mDeviceKernel = nullptr;
        }
        if (mDeviceBias)
        {
            cudaFree(mDeviceBias);
            mDeviceBias = nullptr;
        }
    }

    virtual size_t getWorkspaceSize(int maxBatchSize) const override
    {
        return 0;
    }

    virtual int enqueue(
        int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) override
    {
        float onef{1.0f}, zerof{0.0f};
        __half oneh = fp16::__float2half(1.0f), zeroh = fp16::__float2half(0.0f);

        cublasSetStream(mCublas, stream);
        cudnnSetStream(mCudnn, stream);

        if (mDataType == nvinfer1::DataType::kFLOAT)
        {
            CHECK(cublasSgemm(mCublas, CUBLAS_OP_T, CUBLAS_OP_N, mNbOutputChannels, batchSize, mNbInputChannels, &onef,
                reinterpret_cast<const float*>(mDeviceKernel), mNbInputChannels,
                reinterpret_cast<const float*>(inputs[0]), mNbInputChannels, &zerof,
                reinterpret_cast<float*>(outputs[0]), mNbOutputChannels));
        }
        else
        {
            CHECK(cublasHgemm(mCublas, CUBLAS_OP_T, CUBLAS_OP_N, mNbOutputChannels, batchSize, mNbInputChannels, &oneh,
                reinterpret_cast<const __half*>(mDeviceKernel), mNbInputChannels,
                reinterpret_cast<const __half*>(inputs[0]), mNbInputChannels, &zeroh,
                reinterpret_cast<__half*>(outputs[0]), mNbOutputChannels));
        }
        if (mBiasWeights.count)
        {
            cudnnDataType_t cudnnDT = mDataType == nvinfer1::DataType::kFLOAT ? CUDNN_DATA_FLOAT : CUDNN_DATA_HALF;
            CHECK(cudnnSetTensor4dDescriptor(mSrcDescriptor, CUDNN_TENSOR_NCHW, cudnnDT, 1, mNbOutputChannels, 1, 1));
            CHECK(cudnnSetTensor4dDescriptor(
                mDstDescriptor, CUDNN_TENSOR_NCHW, cudnnDT, batchSize, mNbOutputChannels, 1, 1));
            CHECK(cudnnAddTensor(mCudnn, &onef, mSrcDescriptor, mDeviceBias, &onef, mDstDescriptor, outputs[0]));
        }

        return 0;
    }

    virtual size_t getSerializationSize() override
    {
        return sizeof(mNbInputChannels) + sizeof(mNbOutputChannels) + sizeof(mBiasWeights.count) + sizeof(mDataType)
            + (mKernelWeights.count + mBiasWeights.count) * type2size(mDataType);
    }

    virtual void serialize(void* buffer) override
    {
        char *d = static_cast<char*>(buffer), *a = d;

        write(d, mNbInputChannels);
        write(d, mNbOutputChannels);
        write(d, mBiasWeights.count);
        write(d, mDataType);
        convertAndCopyToBuffer(d, mKernelWeights);
        convertAndCopyToBuffer(d, mBiasWeights);
        assert(d == a + getSerializationSize());
    }

private:
    size_t type2size(nvinfer1::DataType type)
    {
        return type == nvinfer1::DataType::kFLOAT ? sizeof(float) : sizeof(__half);
    }

    template <typename T>
    void write(char*& buffer, const T& val)
    {
        *reinterpret_cast<T*>(buffer) = val;
        buffer += sizeof(T);
    }

    template <typename T>
    void read(const char*& buffer, T& val)
    {
        val = *reinterpret_cast<const T*>(buffer);
        buffer += sizeof(T);
    }

    void* copyToDevice(const void* data, size_t count)
    {
        void* deviceData;
        CHECK(cudaMalloc(&deviceData, count));
        CHECK(cudaMemcpy(deviceData, data, count, cudaMemcpyHostToDevice));
        return deviceData;
    }

    void convertAndCopyToDevice(void*& deviceWeights, const nvinfer1::Weights& weights)
    {
        if (weights.type != mDataType) // Weights are converted in host memory first, if the type does not match
        {
            size_t size = weights.count * (mDataType == nvinfer1::DataType::kFLOAT ? sizeof(float) : sizeof(__half));
            void* buffer = malloc(size);
            for (int64_t v = 0; v < weights.count; ++v)
            {
                if (mDataType == nvinfer1::DataType::kFLOAT)
                {
                    static_cast<float*>(buffer)[v] = fp16::__half2float(static_cast<const __half*>(weights.values)[v]);
                }
                else
                {
                    static_cast<__half*>(buffer)[v] = fp16::__float2half(static_cast<const float*>(weights.values)[v]);
                }
            }
            deviceWeights = copyToDevice(buffer, size);
            free(buffer);
        }
        else
        {
            deviceWeights = copyToDevice(weights.values, weights.count * type2size(mDataType));
        }
    }

    void convertAndCopyToBuffer(char*& buffer, const nvinfer1::Weights& weights)
    {
        if (weights.type != mDataType)
        {
            for (int64_t v = 0; v < weights.count; ++v)
            {
                if (mDataType == nvinfer1::DataType::kFLOAT)
                {
                    reinterpret_cast<float*>(buffer)[v]
                        = fp16::__half2float(static_cast<const __half*>(weights.values)[v]);
                }
                else
                {
                    reinterpret_cast<__half*>(buffer)[v]
                        = fp16::__float2half(static_cast<const float*>(weights.values)[v]);
                }
            }
        }
        else
        {
            std::memcpy(buffer, weights.values, weights.count * type2size(mDataType));
        }
        buffer += weights.count * type2size(mDataType);
    }

    void deserializeToDevice(const char*& hostBuffer, void*& deviceWeights, size_t size)
    {
        deviceWeights = copyToDevice(hostBuffer, size);
        hostBuffer += size;
    }

    int mNbOutputChannels, mNbInputChannels;
    nvinfer1::Weights mKernelWeights, mBiasWeights;

    nvinfer1::DataType mDataType{nvinfer1::DataType::kFLOAT};
    void* mDeviceKernel{nullptr};
    void* mDeviceBias{nullptr};

    cudnnHandle_t mCudnn;
    cublasHandle_t mCublas;
    cudnnTensorDescriptor_t mSrcDescriptor, mDstDescriptor;
};

插件工程两个createPlugin,一个用于构建engine一个用于反序列化

class PluginFactory : public nvinfer1::IPluginFactory, public nvcaffeparser1::IPluginFactoryExt
{
public:
    // caffe parser plugin implementation
    bool isPlugin(const char* name) override
    {
        return isPluginExt(name);
    }

    bool isPluginExt(const char* name) override
    {
        return !strcmp(name, "ip2");
    }

    virtual IPlugin* createPlugin(const char* layerName, const nvinfer1::Weights* weights, int nbWeights) override
    {
        try
        {
            // there's no way to pass parameters through from the model definition, so we have to define it here
            // explicitly
            static const int NB_OUTPUT_CHANNELS = 10;
            assert(isPlugin(layerName) && nbWeights == 2);
            assert(mPlugin.get() == nullptr);
            mPlugin = std::unique_ptr<FCPlugin>(new FCPlugin(weights, nbWeights, NB_OUTPUT_CHANNELS));
            return mPlugin.get();
        }
        catch (std::exception& e)
        {
            sample::gLogError << e.what() << std::endl;
        }

        return nullptr;
    }

    // deserialization plugin implementation
    nvinfer1::IPlugin* createPlugin(const char* layerName, const void* serialData, size_t serialLength) override
    {
        try
        {
            assert(isPlugin(layerName));
            // IPlugin resource will not be released when engine destroy.
            // Use this unique ptr in factory to release the data.
            mPlugin = std::unique_ptr<FCPlugin>(new FCPlugin(serialData, serialLength));
            return mPlugin.get();
        }
        catch (std::exception& e)
        {
            sample::gLogError << e.what() << std::endl;
        }

        return nullptr;
    }

    // User application destroys plugin when it is safe to do so.
    // Should be done after consumers of plugin (like ICudaEngine) are destroyed.
    void destroyPlugin()
    {
        mPlugin.reset();
    }

    std::unique_ptr<FCPlugin> mPlugin{nullptr};
};

https://github.com/Kwull/deepstream-4.0.1.git中objectDetect_Yolo/nvdsinfer_custom_impl_Yol/yoloPlugin.cpp.h为例 插件继承IPluginV2基类实现

//yoloPlugin.h
namespace
{
const char* YOLOV3LAYER_PLUGIN_VERSION {"1"};
const char* YOLOV3LAYER_PLUGIN_NAME {"YoloLayerV3_TRT"};
} // namespace
class YoloLayerV3 : public nvinfer1::IPluginV2
{
public:
    YoloLayerV3 (const void* data, size_t length);
    YoloLayerV3 (const uint& numBoxes, const uint& numClasses, const uint& gridSize);
    const char* getPluginType () const override { return YOLOV3LAYER_PLUGIN_NAME; }
    const char* getPluginVersion () const override { return YOLOV3LAYER_PLUGIN_VERSION; }
    int getNbOutputs () const override { return 1; }

    nvinfer1::Dims getOutputDimensions (
        int index, const nvinfer1::Dims* inputs,
        int nbInputDims) override;

    bool supportsFormat (
        nvinfer1::DataType type, nvinfer1::PluginFormat format) const override;

    void configureWithFormat (
        const nvinfer1::Dims* inputDims, int nbInputs,
        const nvinfer1::Dims* outputDims, int nbOutputs,
        nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) override;

    int initialize () override { return 0; }
    void terminate () override {}
    size_t getWorkspaceSize (int maxBatchSize) const override { return 0; }
    int enqueue (
        int batchSize, const void* const* inputs, void** outputs,
        void* workspace, cudaStream_t stream) override;
    size_t getSerializationSize() const override;
    void serialize (void* buffer) const override;
    void destroy () override { delete this; }
    nvinfer1::IPluginV2* clone() const override;

    void setPluginNamespace (const char* pluginNamespace)override {
        m_Namespace = pluginNamespace;
    }
    virtual const char* getPluginNamespace () const override {
        return m_Namespace.c_str();
    }

private:
    uint m_NumBoxes {0};
    uint m_NumClasses {0};
    uint m_GridSize {0};
    uint64_t m_OutputSize {0};
    std::string m_Namespace {""};
};

class YoloLayerV3PluginCreator : public nvinfer1::IPluginCreator
{
public:
    YoloLayerV3PluginCreator () {}
    ~YoloLayerV3PluginCreator () {}

    const char* getPluginName () const override { return YOLOV3LAYER_PLUGIN_NAME; }
    const char* getPluginVersion () const override { return YOLOV3LAYER_PLUGIN_VERSION; }

    const nvinfer1::PluginFieldCollection* getFieldNames() override {
        std::cerr<< "YoloLayerV3PluginCreator::getFieldNames is not implemented" << std::endl;
        return nullptr;
    }
    //构建插件序列化时使用
    nvinfer1::IPluginV2* createPlugin (
        const char* name, const nvinfer1::PluginFieldCollection* fc) override
    {
        std::cerr<< "YoloLayerV3PluginCreator::getFieldNames is not implemented.\n";
        return nullptr;
    }
    //构建引擎反序列化时使用
    nvinfer1::IPluginV2* deserializePlugin (
        const char* name, const void* serialData, size_t serialLength) override
    {
        std::cout << "Deserialize yoloLayerV3 plugin: " << name << std::endl;
        return new YoloLayerV3(serialData, serialLength);
    }

    void setPluginNamespace(const char* libNamespace) override {
        m_Namespace = libNamespace;
    }
    const char* getPluginNamespace() const override {
        return m_Namespace.c_str();
    }

private:
    std::string m_Namespace {""};
};
//yoloPlugin.cpp
namespace {
template <typename T>
void write(char*& buffer, const T& val)
{
    *reinterpret_cast<T*>(buffer) = val;
    buffer += sizeof(T);
}

template <typename T>
void read(const char*& buffer, T& val)
{
    val = *reinterpret_cast<const T*>(buffer);
    buffer += sizeof(T);
}
} //namespace

// Forward declaration of cuda kernels
cudaError_t cudaYoloLayerV3 (
    const void* input, void* output, const uint& batchSize,
    const uint& gridSize, const uint& numOutputClasses,
    const uint& numBBoxes, uint64_t outputSize, cudaStream_t stream);

YoloLayerV3::YoloLayerV3 (const void* data, size_t length)
{
    const char *d = static_cast<const char*>(data);
    read(d, m_NumBoxes);
    read(d, m_NumClasses);
    read(d, m_GridSize);
    read(d, m_OutputSize);
};

YoloLayerV3::YoloLayerV3 (
    const uint& numBoxes, const uint& numClasses, const uint& gridSize) :
    m_NumBoxes(numBoxes),
    m_NumClasses(numClasses),
    m_GridSize(gridSize)
{
    assert(m_NumBoxes > 0);
    assert(m_NumClasses > 0);
    assert(m_GridSize > 0);
    m_OutputSize = m_GridSize * m_GridSize * (m_NumBoxes * (4 + 1 + m_NumClasses));
};

nvinfer1::Dims
YoloLayerV3::getOutputDimensions(
    int index, const nvinfer1::Dims* inputs, int nbInputDims)
{
    assert(index == 0);
    assert(nbInputDims == 1);
    return inputs[0];
}

bool YoloLayerV3::supportsFormat (
    nvinfer1::DataType type, nvinfer1::PluginFormat format) const {
    return (type == nvinfer1::DataType::kFLOAT &&
            format == nvinfer1::PluginFormat::kNCHW);
}

void
YoloLayerV3::configureWithFormat (
    const nvinfer1::Dims* inputDims, int nbInputs,
    const nvinfer1::Dims* outputDims, int nbOutputs,
    nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize)
{
    assert(nbInputs == 1);
    assert (format == nvinfer1::PluginFormat::kNCHW);
    assert(inputDims != nullptr);
}

int YoloLayerV3::enqueue(
    int batchSize, const void* const* inputs, void** outputs, void* workspace,
    cudaStream_t stream)
{
    CHECK(cudaYoloLayerV3(
              inputs[0], outputs[0], batchSize, m_GridSize, m_NumClasses, m_NumBoxes,
              m_OutputSize, stream));
    return 0;
}

size_t YoloLayerV3::getSerializationSize() const
{
    return sizeof(m_NumBoxes) + sizeof(m_NumClasses) + sizeof(m_GridSize) + sizeof(m_OutputSize);
}

void YoloLayerV3::serialize(void* buffer) const
{
    char *d = static_cast<char*>(buffer);
    write(d, m_NumBoxes);
    write(d, m_NumClasses);
    write(d, m_GridSize);
    write(d, m_OutputSize);
}

nvinfer1::IPluginV2* YoloLayerV3::clone() const
{
    return new YoloLayerV3 (m_NumBoxes, m_NumClasses, m_GridSize);
}

REGISTER_TENSORRT_PLUGIN(YoloLayerV3PluginCreator);
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment