什么是智能合约?智能合约,又称加密合约,是在一定条件下可以直接控制数字资产或资产在各方之间转移的一种计算机程序z--Guts。智能合约不仅以与传统合约相同的方式定义了协议的规则和处罚,还可以自动强制执行这些义务。它通过接受信息作为输入,通过规则为输入赋值,在合约中列出并执行这些合约条款所要求的行为。
void Calibration::_updateScale(){
for(const auto&op:_originaleModel->oplists){
std::vector<std::string>::iterator iter=std::find(_skip_quant_ops.begin(),_skip_quant_ops.end(),op->name);
if(iter!=_skip_quant_ops.end()){
continue;
}
const auto opType=op->type;
if(opType!=MNN::OpType_Convolution&&opType!=MNN::OpType_ConvolutionDepthwise&&
opType!=MNN::OpType_Eltwise){
continue;
}
auto tensorsPair=_opInfo.find(op->name);
if(tensorsPair==_opInfo.end()){
MNN_ERROR("Can't find tensors for%sn",op->name.c_str());
}
if(opType==MNN::OpType_Eltwise){
auto param=op->main.AsEltwise();
//Now only support AddInt8
if(param->type!=MNN::EltwiseType_SUM){
continue;
}
const auto&inputScale0=_scales[tensorsPair->second.first[0]];
const auto&inputScale1=_scales[tensorsPair->second.first[1]];
const auto&outputScale=_scales[tensorsPair->second.second[0]];
const int outputScaleSize=outputScale.size();
std::vector<float>outputInvertScale(outputScaleSize);
Helper::invertData(outputInvertScale.data(),outputScale.data(),outputScaleSize);
op->type=MNN::OpType_EltwiseInt8;
op->main.Reset();
op->main.type=MNN::OpParameter_EltwiseInt8;
auto eltwiseInt8Param=new MNN::EltwiseInt8T;
auto input0ScaleParam=new MNN::QuantizedFloatParamT;
auto input1ScaleParam=new MNN::QuantizedFloatParamT;
auto outputScaleParam=new MNN::QuantizedFloatParamT;
input0ScaleParam->tensorScale=inputScale0;
input1ScaleParam->tensorScale=inputScale1;
outputScaleParam->tensorScale=outputInvertScale;
eltwiseInt8Param->inputQuan0=std::unique_ptr<MNN::QuantizedFloatParamT>(input0ScaleParam);
eltwiseInt8Param->inputQuan1=std::unique_ptr<MNN::QuantizedFloatParamT>(input1ScaleParam);
eltwiseInt8Param->outputQuan=std::unique_ptr<MNN::QuantizedFloatParamT>(outputScaleParam);
op->main.value=eltwiseInt8Param;
continue;
}开发模式及分析:yy625019
//below is Conv/DepthwiseConv
const auto&inputScale=_scales[tensorsPair->second.first[0]];
const auto&outputScale=_scales[tensorsPair->second.second[0]];
auto param=op->main.AsConvolution2D();
const int channles=param->common->outputCount;
const int weightSize=param->weight.size();
param->symmetricQuan.reset(new MNN::QuantizedFloatParamT);
//quantizedParam是param->symmetricQuan的引用
auto&quantizedParam=param->symmetricQuan;
quantizedParam->scale.resize(channles);
quantizedParam->weight.resize(weightSize);
quantizedParam->bias.resize(channles);
if(opType==MNN::OpType_Convolution){
QuantizeConvPerChannel(param->weight.data(),param->weight.size(),param->bias.data(),
quantizedParam->weight.data(),quantizedParam->bias.data(),
quantizedParam->scale.data(),inputScale,outputScale,_weightQuantizeMethod,_weightClampValue);
op->type=MNN::OpType_ConvInt8;
}else if(opType==MNN::OpType_ConvolutionDepthwise){
QuantizeDepthwiseConv(param->weight.data(),param->weight.size(),param->bias.data(),
quantizedParam->weight.data(),quantizedParam->bias.data(),
quantizedParam->scale.data(),inputScale,outputScale,_weightQuantizeMethod,_weightClampValue);
op->type=MNN::OpType_DepthwiseConvInt8;
}
if(param->common->relu6){
param->common->relu=true;
param->common->relu6=false;
}
//清除原本的权重和bias
param->weight.clear();
param->bias.clear();
}
}