Develop API interface for quantitative spot contract tracking system:Develop API interface in the quantitative spot contract tracking system to interface with exchange APIs.This interface needs to be designed according to the specifications and requirements of the exchange API,and ensure that it matches the interface parameters and data format of the exchange API.
Implement risk management functions:Implement risk management functions in the quantitative spot contract tracking system,such as position management,risk control,stop loss and stop profit,etc.This requires interaction with the exchange API to monitor market risks in real-time and take corresponding risk management measures.
实现数据分析与决策支持功能:在量化现货合约跟单系统中实现数据分析与决策支持功能,例如用户行为分析、市场分析和交易预测等。这需要从交易所API获取相关的数据,并进行分析和挖掘,以便制定更有效的交易策略和决策
获取交易所API:首先,需要获取交易所的API,以实现量化现货合约跟单系统与交易所的交互。
实现数据传输和交互功能:通过量化现货合约跟单系统API接口实现与交易所API的数据传输和交互功能。这包括数字资产交易相关的数据,例如行情数据、交易数据等。
实现交易功能:在量化现货合约跟单系统中实现交易功能,包括下单、撤单等。这需要与交易所API进行交互,确保交易指令能够成功发送到交易所。
void Calibration::_updateScale(){
for(const auto&op:_originaleModel->oplists){
std::vector<std::string>::iterator iter=std::find(_skip_quant_ops.begin(),_skip_quant_ops.end(),op->name);
if(iter!=_skip_quant_ops.end()){
continue;
}
const auto opType=op->type;
if(opType!=MNN::OpType_Convolution&&opType!=MNN::OpType_ConvolutionDepthwise&&
opType!=MNN::OpType_Eltwise){
continue;
}
auto tensorsPair=_opInfo.find(op->name);
if(tensorsPair==_opInfo.end()){
MNN_ERROR("Can't find tensors for%sn",op->name.c_str());
}
if(opType==MNN::OpType_Eltwise){
auto param=op->main.AsEltwise();
//Now only support AddInt8
if(param->type!=MNN::EltwiseType_SUM){
continue;
}
const auto&inputScale0=_scales[tensorsPair->second.first[0]];
const auto&inputScale1=_scales[tensorsPair->second.first[1]];
const auto&outputScale=_scales[tensorsPair->second.second[0]];
const int outputScaleSize=outputScale.size();
std::vector<float>outputInvertScale(outputScaleSize);
Helper::invertData(outputInvertScale.data(),outputScale.data(),outputScaleSize);
op->type=MNN::OpType_EltwiseInt8;
op->main.Reset();
op->main.type=MNN::OpParameter_EltwiseInt8;
auto eltwiseInt8Param=new MNN::EltwiseInt8T;
auto input0ScaleParam=new MNN::QuantizedFloatParamT;
auto input1ScaleParam=new MNN::QuantizedFloatParamT;
auto outputScaleParam=new MNN::QuantizedFloatParamT;
input0ScaleParam->tensorScale=inputScale0;
input1ScaleParam->tensorScale=inputScale1;
outputScaleParam->tensorScale=outputInvertScale;
eltwiseInt8Param->inputQuan0=std::unique_ptr<MNN::QuantizedFloatParamT>(input0ScaleParam);
eltwiseInt8Param->inputQuan1=std::unique_ptr<MNN::QuantizedFloatParamT>(input1ScaleParam);
eltwiseInt8Param->outputQuan=std::unique_ptr<MNN::QuantizedFloatParamT>(outputScaleParam);
op->main.value=eltwiseInt8Param;
continue;
}
//below is Conv/DepthwiseConv
const auto&inputScale=_scales[tensorsPair->second.first[0]];
const auto&outputScale=_scales[tensorsPair->second.second[0]];
auto param=op->main.AsConvolution2D();
const int channles=param->common->outputCount;
const int weightSize=param->weight.size();
param->symmetricQuan.reset(new MNN::QuantizedFloatParamT);
//quantizedParam是param->symmetricQuan的引用
auto&quantizedParam=param->symmetricQuan;
quantizedParam->scale.resize(channles);
quantizedParam->weight.resize(weightSize);
quantizedParam->bias.resize(channles);
if(opType==MNN::OpType_Convolution){
QuantizeConvPerChannel(param->weight.data(),param->weight.size(),param->bias.data(),
quantizedParam->weight.data(),quantizedParam->bias.data(),
quantizedParam->scale.data(),inputScale,outputScale,_weightQuantizeMethod,_weightClampValue);
op->type=MNN::OpType_ConvInt8;
}else if(opType==MNN::OpType_ConvolutionDepthwise){
QuantizeDepthwiseConv(param->weight.data(),param->weight.size(),param->bias.data(),
quantizedParam->weight.data(),quantizedParam->bias.data(),
quantizedParam->scale.data(),inputScale,outputScale,_weightQuantizeMethod,_weightClampValue);
op->type=MNN::OpType_DepthwiseConvInt8;
}
if(param->common->relu6){
param->common->relu=true;
param->common->relu6=false;
}
//清除原本的权重和bias
param->weight.clear();
param->bias.clear();
}
}