什么是DAPP?DAPP是Decentralized Application的缩写,中文叫分布式应用/去中心化应用。通常来说,不同的DAPP会采用不同的底层技术开发平台和共识机制,或者自行发布代币。
以太坊中一般会认为智能合约就是DAPP,当然更准确的可以认为智能合约相当于服务器后台,另外要实现用户体验,还需要UI交互界面,通过RPC与后台对接,那么DAPP系统开发就是开发包含完整的智能合约和UI交互界面的DAPP。
int QuantizeConvPerChannel(const floatweight,const int size,const floatbias,int8_t*quantizedWeight,
int32_tquantizedBias,floatscale,const std::vector<float>&inputScale,
const std::vector<float>&outputScale,std::string method,float weightClampValue,bool mergeChannel){
const int inputChannels=inputScale.size();
const int outputChannels=outputScale.size();
const int icXoc=inputChannels*outputChannels;
DCHECK(size%icXoc==0)<<"Input Data Size Error!";
std::vector<float>quantizedWeightScale(outputChannels);
float inputScalexWeight=1.0f;
if(mergeChannel){
if(method=="MAX_ABS"){
SymmetricQuantizeWeight(weight,size,quantizedWeight,quantizedWeightScale.data(),outputChannels,weightClampValue);
}
else if(method=="ADMM"){
QuantizeWeightADMM(weight,size,quantizedWeight,quantizedWeightScale.data(),outputChannels,weightClampValue);
}
inputScalexWeight=inputScale[0];
}else{
const int kernelSize=size/icXoc;
const int ocStride=size/outputChannels;
//每个权重都乘上对应scale
std::vector<float>weightMultiByInputScale(size);
for(int oc=0;oc<outputChannels;++oc){
for(int ic=0;ic<inputChannels;++ic){
for(int i=0;i<kernelSize;++i){
const int index=ococStride+ickernelSize+i;
weightMultiByInputScale[index]=inputScale[ic]*weight[index];
}
}
}开发需求及分析:yy625019
if(method=="MAX_ABS"){
SymmetricQuantizeWeight(weightMultiByInputScale.data(),size,quantizedWeight,quantizedWeightScale.data(),outputChannels,weightClampValue);
}
else if(method=="ADMM"){
QuantizeWeightADMM(weightMultiByInputScale.data(),size,quantizedWeight,quantizedWeightScale.data(),outputChannels,weightClampValue);
}
}
for(int i=0;i<outputChannels;++i){
if(fabs(outputScale<i>)<=1e-6){
scale<i>=0.0f;
}else{
scale<i>=inputScalexWeight*quantizedWeightScale<i>/outputScale[0];
}
}
if(bias){
for(int i=0;i<outputChannels;++i){
if(fabs(inputScalexWeight)<=1e-6||fabs(quantizedWeightScale<i>)<=1e-6){
quantizedBias<i>=0;
}else{
quantizedBias<i>=static_cast<int32_t>(bias<i>/(inputScalexWeight*quantizedWeightScale<i>));
}
}
}
return 0;
}