mysql exporter源码分析

本文涉及的产品
RDS MySQL Serverless 基础系列,0.5-2RCU 50GB
云数据库 RDS MySQL,集群版 2核4GB 100GB
推荐场景:
搭建个人博客
RDS MySQL Serverless 高可用系列,价值2615元额度,1个月
简介: 通过对MySQL Exporter整体进行分析,实现一个自定义的demo收集,并进行采集的整合

通过对MySQL Exporter整体进行分析,实现一个自定义的demo收集,并进行采集的整合

1.入口(main函数)

可以看到,MySQL Exporter提供了两个URL供访问,一个是 /,用于打印一些基本的信息,另一个就是用于收集metrics的 /metrics 链接。
我们进去看看 /metrics 对应的handler,它是由 newHandler 生成的.
如果我们需要整合node_exporter ,可以在里面进行整合。或者参考我之前的文章进行具体的整合。

// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package main

import (
    "context"
    "crypto/tls"
    "crypto/x509"
    "fmt"
    "io/ioutil"
    "net/http"
    "os"
    "path"
    "strconv"
    "strings"
    "time"

    "github.com/go-kit/kit/log"
    "github.com/go-kit/kit/log/level"
    "github.com/go-sql-driver/mysql"
    "github.com/prometheus/client_golang/prometheus"
    "github.com/prometheus/client_golang/prometheus/promhttp"
    "github.com/prometheus/common/promlog"
    "github.com/prometheus/common/promlog/flag"
    "github.com/prometheus/common/version"
    nodeCollector "github.com/prometheus/mysqld_exporter/cmd/collector"
    "gopkg.in/alecthomas/kingpin.v2"
    "gopkg.in/ini.v1"

    "github.com/prometheus/mysqld_exporter/collector"
)

var (
    listenAddress = kingpin.Flag(
        "web.listen-address",
        "Address to listen on for web interface and telemetry.",
    ).Default(":9100").String()
    metricPath = kingpin.Flag(
        "web.telemetry-path",
        "Path under which to expose metrics.",
    ).Default("/metrics").String()
    timeoutOffset = kingpin.Flag(
        "timeout-offset",
        "Offset to subtract from timeout in seconds.",
    ).Default("0.25").Float64()
    configMycnf = kingpin.Flag(
        "config.my-cnf",
        "Path to .my.cnf file to read MySQL credentials from.",
    ).Default(path.Join(os.Getenv("HOME"), ".my.cnf")).String()
    tlsInsecureSkipVerify = kingpin.Flag(
        "tls.insecure-skip-verify",
        "Ignore certificate and server verification when using a tls connection.",
    ).Bool()
    dsn string
)

// scrapers lists all possible collection methods and if they should be enabled by default.
var scrapers = map[collector.Scraper]bool{
    collector.ScrapeGlobalStatus{}:                        true,
    collector.ScrapeGlobalVariables{}:                     true,
    collector.ScrapeSlaveStatus{}:                         true,
    collector.ScrapeProcesslist{}:                         false,
    collector.ScrapeUser{}:                                false,
    collector.ScrapeTableSchema{}:                         false,
    collector.ScrapeInfoSchemaInnodbTablespaces{}:         false,
    collector.ScrapeInnodbMetrics{}:                       false,
    collector.ScrapeAutoIncrementColumns{}:                false,
    collector.ScrapeBinlogSize{}:                          false,
    collector.ScrapePerfTableIOWaits{}:                    false,
    collector.ScrapePerfIndexIOWaits{}:                    false,
    collector.ScrapePerfTableLockWaits{}:                  false,
    collector.ScrapePerfEventsStatements{}:                false,
    collector.ScrapePerfEventsStatementsSum{}:             false,
    collector.ScrapePerfEventsWaits{}:                     false,
    collector.ScrapePerfFileEvents{}:                      false,
    collector.ScrapePerfFileInstances{}:                   false,
    collector.ScrapePerfMemoryEvents{}:                    false,
    collector.ScrapePerfReplicationGroupMembers{}:         false,
    collector.ScrapePerfReplicationGroupMemberStats{}:     false,
    collector.ScrapePerfReplicationApplierStatsByWorker{}: false,
    collector.ScrapeUserStat{}:                            false,
    collector.ScrapeClientStat{}:                          false,
    collector.ScrapeTableStat{}:                           false,
    collector.ScrapeSchemaStat{}:                          false,
    collector.ScrapeInnodbCmp{}:                           true,
    collector.ScrapeInnodbCmpMem{}:                        true,
    collector.ScrapeQueryResponseTime{}:                   true,
    collector.ScrapeEngineTokudbStatus{}:                  false,
    collector.ScrapeEngineInnodbStatus{}:                  false,
    collector.ScrapeHeartbeat{}:                           false,
    collector.ScrapeSlaveHosts{}:                          false,
    collector.ScrapeReplicaHost{}:                         false,
    collector.ScrapeRdsMysqlDemo2{}:                       true,
}

func parseMycnf(config interface{}) (string, error) {
    var dsn string
    opts := ini.LoadOptions{
        // MySQL ini file can have boolean keys.
        AllowBooleanKeys: true,
    }
    cfg, err := ini.LoadSources(opts, config)
    if err != nil {
        return dsn, fmt.Errorf("failed reading ini file: %s", err)
    }
    user := cfg.Section("client").Key("user").String()
    password := cfg.Section("client").Key("password").String()
    if user == "" {
        return dsn, fmt.Errorf("no user specified under [client] in %s", config)
    }
    host := cfg.Section("client").Key("host").MustString("localhost")
    port := cfg.Section("client").Key("port").MustUint(3306)
    socket := cfg.Section("client").Key("socket").String()
    sslCA := cfg.Section("client").Key("ssl-ca").String()
    sslCert := cfg.Section("client").Key("ssl-cert").String()
    sslKey := cfg.Section("client").Key("ssl-key").String()
    passwordPart := ""
    if password != "" {
        passwordPart = ":" + password
    } else {
        if sslKey == "" {
            return dsn, fmt.Errorf("password or ssl-key should be specified under [client] in %s", config)
        }
    }
    if socket != "" {
        dsn = fmt.Sprintf("%s%s@unix(%s)/", user, passwordPart, socket)
    } else {
        dsn = fmt.Sprintf("%s%s@tcp(%s:%d)/", user, passwordPart, host, port)
    }
    if sslCA != "" {
        if tlsErr := customizeTLS(sslCA, sslCert, sslKey); tlsErr != nil {
            tlsErr = fmt.Errorf("failed to register a custom TLS configuration for mysql dsn: %s", tlsErr)
            return dsn, tlsErr
        }
        dsn = fmt.Sprintf("%s?tls=custom", dsn)
    }

    return dsn, nil
}

func customizeTLS(sslCA string, sslCert string, sslKey string) error {
    var tlsCfg tls.Config
    caBundle := x509.NewCertPool()
    pemCA, err := ioutil.ReadFile(sslCA)
    if err != nil {
        return err
    }
    if ok := caBundle.AppendCertsFromPEM(pemCA); ok {
        tlsCfg.RootCAs = caBundle
    } else {
        return fmt.Errorf("failed parse pem-encoded CA certificates from %s", sslCA)
    }
    if sslCert != "" && sslKey != "" {
        certPairs := make([]tls.Certificate, 0, 1)
        keypair, err := tls.LoadX509KeyPair(sslCert, sslKey)
        if err != nil {
            return fmt.Errorf("failed to parse pem-encoded SSL cert %s or SSL key %s: %s",
                sslCert, sslKey, err)
        }
        certPairs = append(certPairs, keypair)
        tlsCfg.Certificates = certPairs
        tlsCfg.InsecureSkipVerify = *tlsInsecureSkipVerify
    }
    mysql.RegisterTLSConfig("custom", &tlsCfg)
    return nil
}

func init() {
    prometheus.MustRegister(version.NewCollector("mysqld_exporter"))
}

func newHandler(metrics collector.Metrics, scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc {
    return func(w http.ResponseWriter, r *http.Request) {
        filteredScrapers := scrapers
        params := r.URL.Query()["collect[]"]
        // Use request context for cancellation when connection gets closed.
        ctx := r.Context()
        // If a timeout is configured via the Prometheus header, add it to the context.
        if v := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"); v != "" {
            timeoutSeconds, err := strconv.ParseFloat(v, 64)
            if err != nil {
                level.Error(logger).Log("msg", "Failed to parse timeout from Prometheus header", "err", err)
            } else {
                if *timeoutOffset >= timeoutSeconds {
                    // Ignore timeout offset if it doesn't leave time to scrape.
                    level.Error(logger).Log("msg", "Timeout offset should be lower than prometheus scrape timeout", "offset", *timeoutOffset, "prometheus_scrape_timeout", timeoutSeconds)
                } else {
                    // Subtract timeout offset from timeout.
                    timeoutSeconds -= *timeoutOffset
                }
                // Create new timeout context with request context as parent.
                var cancel context.CancelFunc
                ctx, cancel = context.WithTimeout(ctx, time.Duration(timeoutSeconds*float64(time.Second)))
                defer cancel()
                // Overwrite request with timeout context.
                r = r.WithContext(ctx)
            }
        }
        level.Debug(logger).Log("msg", "collect[] params", "params", strings.Join(params, ","))

        // Check if we have some "collect[]" query parameters.
        if len(params) > 0 {
            filters := make(map[string]bool)
            for _, param := range params {
                filters[param] = true
            }

            filteredScrapers = nil
            for _, scraper := range scrapers {
                if filters[scraper.Name()] {
                    filteredScrapers = append(filteredScrapers, scraper)
                }
            }
        }

        registry := prometheus.NewRegistry()
        registry.MustRegister(collector.New(ctx, dsn, metrics, filteredScrapers, logger))

        // add node_exporter start
        nc, err := nodeCollector.NewNodeCollector(logger)
        if err != nil {
            logger.Log("couldn't create collector: %s", err)
        }
        registry.MustRegister(nc)
        // add node_exporter end

        gatherers := prometheus.Gatherers{
            prometheus.DefaultGatherer,
            registry,
        }
        // Delegate http serving to Prometheus client library, which will call collector.Collect.
        h := promhttp.HandlerFor(gatherers, promhttp.HandlerOpts{})
        h.ServeHTTP(w, r)
    }
}

func main() {
    // Generate ON/OFF flags for all scrapers.
    scraperFlags := map[collector.Scraper]*bool{}
    for scraper, enabledByDefault := range scrapers {
        defaultOn := "false"
        if enabledByDefault {
            defaultOn = "true"
        }

        f := kingpin.Flag(
            "collect."+scraper.Name(),
            scraper.Help(),
        ).Default(defaultOn).Bool()

        scraperFlags[scraper] = f
    }

    // Parse flags.
    promlogConfig := &promlog.Config{}
    flag.AddFlags(kingpin.CommandLine, promlogConfig)
    kingpin.Version(version.Print("mysqld_exporter"))
    kingpin.HelpFlag.Short('h')
    kingpin.Parse()
    logger := promlog.New(promlogConfig)

    // landingPage contains the HTML served at '/'.
    // TODO: Make this nicer and more informative.
    var landingPage = []byte(`<html>
<head><title>MySQLd exporter</title></head>
<body>
<h1>MySQLd exporter</h1>
<p><a href='` + *metricPath + `'>Metrics</a></p>
</body>
</html>
`)

    level.Info(logger).Log("msg", "Starting msqyld_exporter", "version", version.Info())
    level.Info(logger).Log("msg", "Build context", version.BuildContext())

    dsn = os.Getenv("DATA_SOURCE_NAME")
    if len(dsn) == 0 {
        var err error
        if dsn, err = parseMycnf(*configMycnf); err != nil {
            level.Info(logger).Log("msg", "Error parsing my.cnf", "file", *configMycnf, "err", err)
            os.Exit(1)
        }
    }

    // Register only scrapers enabled by flag.
    enabledScrapers := []collector.Scraper{}
    for scraper, enabled := range scraperFlags {
        if *enabled {
            level.Info(logger).Log("msg", "Scraper enabled", "scraper", scraper.Name())
            enabledScrapers = append(enabledScrapers, scraper)
        }
    }
    handlerFunc := newHandler(collector.NewMetrics(), enabledScrapers, logger)
    http.Handle(*metricPath, promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, handlerFunc))
    http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
        w.Write(landingPage)
    })

    level.Info(logger).Log("msg", "Listening on address", "address", *listenAddress)
    if err := http.ListenAndServe(*listenAddress, nil); err != nil {
        level.Error(logger).Log("msg", "Error starting HTTP server", "err", err)
        os.Exit(1)
    }
}

2.接口实现

而关键就在于 registry.MustRegister 要求给的参数是符合 Collector 接口的实现,也就是说,每次需要收集信息的时候,就会调用 Collector 接口的 Collect 方法:

type Collector interface {
    Describe(chan<- *Desc)
    Collect(chan<- Metric)
}

我们不难发现,收集器并发收集所有指标,每个具体指标都会实现 Scraper 这个接口:

// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package collector

import (
    "context"
    "database/sql"

    "github.com/go-kit/kit/log"
    _ "github.com/go-sql-driver/mysql"
    "github.com/prometheus/client_golang/prometheus"
)

// Scraper is minimal interface that let's you add new prometheus metrics to mysqld_exporter.
type Scraper interface {
    // Name of the Scraper. Should be unique.
    Name() string

    // Help describes the role of the Scraper.
    // Example: "Collect from SHOW ENGINE INNODB STATUS"
    Help() string

    // Version of MySQL from which scraper is available.
    Version() float64

    // Scrape collects data from database connection and sends it over channel as prometheus metric.
    Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error
}

那就简单了,我们如果想要实现一个指标的采集只要实现该接口就行,而具体的指标,就在 Scrape 这个接口里,从数据库里查出来,并且利用 各种方式把需要的数据提取出来,例如文本解析,正则等等。我们来实现一个简单的收集器:

// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Scrape `mysql.user test`.

package collector

import (
    "context"
    "database/sql"

    "github.com/go-kit/kit/log"
    "github.com/prometheus/client_golang/prometheus"
)

// Metric descriptors.
var (
    RdsDemo2Desc = prometheus.NewDesc(
        prometheus.BuildFQName(namespace, mysql, "rds_demo2_test"),
        "this is rds_demo2 test",
        nil, nil)
)

// ScrapeUser collects from `information_schema.processlist`.
type ScrapeRdsMysqlDemo2 struct{}

// Name of the Scraper. Should be unique.
func (ScrapeRdsMysqlDemo2) Name() string {
    return "ScrapeRdsMysqlDemo2"
}

// Help describes the role of the Scraper.
func (ScrapeRdsMysqlDemo2) Help() string {
    return " 1 Collect data from mysql.user"
}

// Version of MySQL from which scraper is available.
func (ScrapeRdsMysqlDemo2) Version() float64 {
    return 8.0
}

// Scrape collects data from database connection and sends it over channel as prometheus metric.
func (ScrapeRdsMysqlDemo2) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error {

    ch <- prometheus.MustNewConstMetric(RdsDemo2Desc, prometheus.GaugeValue, float64(11))

    return nil
}

// check interface
var _ Scraper = ScrapeRdsMysqlDemo2{}

3.采集的集合

通过上边的代码,我们已经知道了,mysql exporter采集指标的方式就是一个个实现接口就行,我们可以很方便扩展。我们可以用集合来表示监控参数的范围。首先exporter中利用scrapers常量记录了一个默认的采集范围集合A。


// scrapers lists all possible collection methods and if they should be enabled by default.
var scrapers = map[collector.Scraper]bool{
    collector.ScrapeGlobalStatus{}:                        true,
    collector.ScrapeGlobalVariables{}:                     true,
    collector.ScrapeSlaveStatus{}:                         true,
    collector.ScrapeProcesslist{}:                         false,
    collector.ScrapeUser{}:                                false,
    collector.ScrapeTableSchema{}:                         false,
    collector.ScrapeInfoSchemaInnodbTablespaces{}:         false,
    collector.ScrapeInnodbMetrics{}:                       false,
    collector.ScrapeAutoIncrementColumns{}:                false,
    collector.ScrapeBinlogSize{}:                          false,
    collector.ScrapePerfTableIOWaits{}:                    false,
    collector.ScrapePerfIndexIOWaits{}:                    false,
    collector.ScrapePerfTableLockWaits{}:                  false,
    collector.ScrapePerfEventsStatements{}:                false,
    collector.ScrapePerfEventsStatementsSum{}:             false,
    collector.ScrapePerfEventsWaits{}:                     false,
    collector.ScrapePerfFileEvents{}:                      false,
    collector.ScrapePerfFileInstances{}:                   false,
    collector.ScrapePerfMemoryEvents{}:                    false,
    collector.ScrapePerfReplicationGroupMembers{}:         false,
    collector.ScrapePerfReplicationGroupMemberStats{}:     false,
    collector.ScrapePerfReplicationApplierStatsByWorker{}: false,
    collector.ScrapeUserStat{}:                            false,
    collector.ScrapeClientStat{}:                          false,
    collector.ScrapeTableStat{}:                           false,
    collector.ScrapeSchemaStat{}:                          false,
    collector.ScrapeInnodbCmp{}:                           true,
    collector.ScrapeInnodbCmpMem{}:                        true,
    collector.ScrapeQueryResponseTime{}:                   true,
    collector.ScrapeEngineTokudbStatus{}:                  false,
    collector.ScrapeEngineInnodbStatus{}:                  false,
    collector.ScrapeHeartbeat{}:                           false,
    collector.ScrapeSlaveHosts{}:                          false,
    collector.ScrapeReplicaHost{}:                         false,
    collector.ScrapeRdsMysqlDemo2{}:                       true,//此处为自己简单的实现
}

exporter也允许在exporter启动的时候,通过设置启动参数来设置采集范围B。当集合B不存在时,集合A生效;当集合B存在时,集合B生效,集合A失效。Prometheus在采集exporter的数据时,可以携带一个collect[]参数设定采集范围C。当集合C不存在时,Prometheus最终的采集范围是A或者B(取决于哪个集合生效);当集合C存在时,Prometheus最终的采集范围时C和A或者B(取决于哪个集合生效)的交集。

相关实践学习
如何在云端创建MySQL数据库
开始实验后,系统会自动创建一台自建MySQL的 源数据库 ECS 实例和一台 目标数据库 RDS。
全面了解阿里云能为你做什么
阿里云在全球各地部署高效节能的绿色数据中心,利用清洁计算为万物互联的新世界提供源源不断的能源动力,目前开服的区域包括中国(华北、华东、华南、香港)、新加坡、美国(美东、美西)、欧洲、中东、澳大利亚、日本。目前阿里云的产品涵盖弹性计算、数据库、存储与CDN、分析与搜索、云通信、网络、管理与监控、应用服务、互联网中间件、移动服务、视频服务等。通过本课程,来了解阿里云能够为你的业务带来哪些帮助 &nbsp; &nbsp; 相关的阿里云产品:云服务器ECS 云服务器 ECS(Elastic Compute Service)是一种弹性可伸缩的计算服务,助您降低 IT 成本,提升运维效率,使您更专注于核心业务创新。产品详情: https://www.aliyun.com/product/ecs
相关文章
|
存储 SQL 监控
MySQL · 源码分析 · 8.0 原子DDL的实现过程续
之前的一篇月报MySQL · 源码分析 · 原子DDL的实现过程对MySQL8.0的原子DDL的背景以及使用的一些关键数据结构进行了阐述,同时也以CREATE TABLE为例介绍了Server层和Storage层统一系统表后如何创建一张新表进行了介绍。
1973 0
|
SQL Oracle 关系型数据库
MySQL · 源码分析 · Derived table代码分析
在具体介绍MySQL的derived table之前,先介绍一下子查询的概念。在MySQL中,包含2种类型的子查询:From字句中的子查询,例如select * from (select * from t1) tt;tt是一个抽象的表概念,内部就是一个子查询,在PG的概念中叫做sublink,MySQL则叫做derived table、view其他位置的子查询,如投影列中、条件中、having中,
357 0
MySQL · 源码分析 · Derived table代码分析
|
关系型数据库 MySQL
MySQL · 源码分析 · Subquery代码分析
在上一篇介绍derived table的文章中,开头已经介绍了MySQL中子查询的基本概念,具体详见:https://ata.alibaba-inc.com/articles/221930?spm=ata.25287382.0.0.78a241676LAHnE这篇文章将重点介绍条件/投影中的子查询在MySQL中的处理,例如SELECT t1.c2, (select sum(t2.c1) from 
577 0
|
SQL 关系型数据库 MySQL
MySQL 子查询优化源码分析
# 子查询定义 在一个完整的查询语句中包含的子查询块被称为子查询。通常情况下,我们可以将出现在SELECT、WHERE和HAVING语法中的子查询块称为嵌套子查询,出现在FROM语法后的子查询块称为内联视图或派生表。 本篇文章将会结合源码介绍在MySQL中针对子查询的几种优化策略。 # 子查询在执行计划中的表示 ![temp.jpg](https://ata2-img.oss-cn
324 0
MySQL 子查询优化源码分析
|
SQL NoSQL 关系型数据库
MySQL · 源码分析 · MySQL Range (Min-Max Tree)结构分析
概述条件查询被广泛的使用在SQL查询中,复杂条件是否能在执行过程中被优化,比如恒为true或者false的条件,可以合并的条件。另外,由于索引是MySQL访问数据的基本方式,已经追求更快的访问方式,SARGable这个概念已经被我们遗忘了,因为他已经成为默认必要的方法(Search ARGument ABLE)。MySQL如何组织复杂条件并计算各个Ranges所影响到的对应可以使用的索引的代价和使
697 0
|
关系型数据库 索引
MySQL · 源码分析 · 聚合函数(Aggregate Function)的实现过程
--- title: MySQL · 源码分析 · 聚合函数(Aggregate Function)的实现过程 author: 道客 --- ## 总览 聚合函数(Aggregate Function)顾名思义,就是将一组数据进行统一计算,常常用于分析型数据库中,当然在应用中是非常重要不可或缺的函数计算方式。比如我们常见的COUNT/AVG/SUM/MIN/MAX等等。本文主要分析下
1865 0
|
关系型数据库 MySQL 数据库
MySQL · 源码分析 · Innodb缓冲池刷脏的多线程实现
简介 为了提高性能,大多数的数据库在操作数据时都不会直接读写磁盘,而是中间经过缓冲池,将要写入磁盘的数据先写入到缓冲池里,然后在某个时刻后台线程把修改的数据刷写到磁盘上。MySQL的InnoDB引擎也使用缓冲池来缓存从磁盘读取或修改的数据页,如果当前数据库需要操作的数据集比缓冲池中的空闲页面大的话,当前缓冲池中的数据页就必须进行脏页淘汰,以便腾出足够的空闲页面供当前的查询使用。
1466 0
|
MySQL 关系型数据库 索引
MySQL · 源码分析 · binlog crash recovery
前言 本文主要介绍binlog crash recovery 的过程 假设用户使用 InnoDB 引擎,sync_binlog=1 使用 MySQL 5.7.20 版本进行分析 crash recovery 过程中,binlog 需要保证: 所有已提交事务的binlog已存在 所有未提交...
2535 0
|
SQL 监控 MySQL
MySQL · 源码分析 · change master to
重要数据结构 Rpl_info 的基类,保存了一些错误信息,如 IO/SQL thread last error class Slave_reporting_capability { // 获取last error Error const& last_error() const ...
1700 0
|
存储 SQL 缓存
MySQL · 源码分析 · 原子DDL的实现过程
众所周知,MySQL8.0之前的版本DDL是非原子的。也就是说对于复合的DDL,比如DROP TABLE t1, t2;执行过程中如果遇到server crash,有可能出现表t1被DROP掉了,但是t2没有被DROP掉的情况。
2033 0