From 0c6eac64c7d63d1fdf6fa78724b817f03e5d7454 Mon Sep 17 00:00:00 2001 From: Balasubramanian Kandasamy Date: Mon, 8 Aug 2016 15:15:17 +0530 Subject: [PATCH 01/44] Raise version number after cloning 5.5.52 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index acabf9b42d0ca..d44c8b2800612 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=5 MYSQL_VERSION_MINOR=5 -MYSQL_VERSION_PATCH=52 +MYSQL_VERSION_PATCH=53 MYSQL_VERSION_EXTRA= From 213765cc222139c05c27774e0555cabeff7c3bbd Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Mon, 5 Sep 2016 13:18:04 +0200 Subject: [PATCH 02/44] - Fix MDEV-10496. Memory leak in discovery modified: storage/connect/ha_connect.cc modified: storage/connect/mycat.cc - Fix wrong lrecl calculation for virtual columns modified: storage/connect/reldef.cpp - Typo modified: storage/connect/jdbconn.cpp modified: storage/connect/json.cpp --- storage/connect/ha_connect.cc | 84 ++++++++------- storage/connect/jdbconn.cpp | 195 +--------------------------------- storage/connect/json.cpp | 6 +- storage/connect/mycat.cc | 14 +-- storage/connect/reldef.cpp | 2 +- 5 files changed, 54 insertions(+), 247 deletions(-) diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index ea6fb1b08c16d..cf945a73f4610 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -224,6 +224,7 @@ uint GetWorkSize(void); void SetWorkSize(uint); extern "C" const char *msglang(void); +static void PopUser(PCONNECT xp); static PCONNECT GetUser(THD *thd, PCONNECT xp); static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp); @@ -831,34 +832,43 @@ ha_connect::~ha_connect(void) table ? table->s->table_name.str : "", xp, xp ? xp->count : 0); - if (xp) { - PCONNECT p; + PopUser(xp); +} // end of ha_connect destructor - xp->count--; - for (p= user_connect::to_users; p; p= p->next) - if (p == xp) - break; +/****************************************************************************/ +/* Check whether this user can be removed. */ +/****************************************************************************/ +static void PopUser(PCONNECT xp) +{ + if (xp) { + xp->count--; - if (p && !p->count) { - if (p->next) - p->next->previous= p->previous; + if (!xp->count) { + PCONNECT p; - if (p->previous) - p->previous->next= p->next; - else - user_connect::to_users= p->next; + for (p= user_connect::to_users; p; p= p->next) + if (p == xp) + break; - } // endif p + if (p) { + if (p->next) + p->next->previous= p->previous; - if (!xp->count) { - PlugCleanup(xp->g, true); - delete xp; - } // endif count + if (p->previous) + p->previous->next= p->next; + else + user_connect::to_users= p->next; - } // endif xp + } // endif p -} // end of ha_connect destructor + PlugCleanup(xp->g, true); + delete xp; + } // endif count + + } // endif xp + +} // end of PopUser /****************************************************************************/ @@ -866,7 +876,7 @@ ha_connect::~ha_connect(void) /****************************************************************************/ static PCONNECT GetUser(THD *thd, PCONNECT xp) { - if (!thd) + if (!thd) return NULL; if (xp && thd == xp->thdp) @@ -890,7 +900,6 @@ static PCONNECT GetUser(THD *thd, PCONNECT xp) return xp; } // end of GetUser - /****************************************************************************/ /* Get the global pointer of the user of this handler. */ /****************************************************************************/ @@ -5261,7 +5270,18 @@ static int connect_assisted_discovery(handlerton *, THD* thd, if (!(shm= (char*)db)) db= table_s->db.str; // Default value - // Check table type + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + goto jer; + } // endif jump_level + + if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { + my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + goto err; + } // endif rc + + // Check table type if (ttp == TAB_UNDEF) { topt->type= (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS"; ttp= GetTypeID(topt->type); @@ -5270,20 +5290,9 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } else if (ttp == TAB_NIY) { sprintf(g->Message, "Unsupported table type %s", topt->type); my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - return HA_ERR_INTERNAL_ERROR; + goto err; } // endif ttp - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return HA_ERR_INTERNAL_ERROR; - } // endif jump_level - - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif rc - if (!tab) { if (ttp == TAB_TBL) { // Make tab the first table of the list @@ -5843,6 +5852,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, rc= init_table_share(thd, table_s, create_info, &sql); g->jump_level--; + PopUser(xp); return rc; } // endif ok @@ -5850,7 +5860,9 @@ static int connect_assisted_discovery(handlerton *, THD* thd, err: g->jump_level--; - return HA_ERR_INTERNAL_ERROR; + jer: + PopUser(xp); + return HA_ERR_INTERNAL_ERROR; } // end of connect_assisted_discovery /** diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index 3b8de3e975bfb..952847507a0b3 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -498,145 +498,6 @@ PQRYRES JDBCDrivers(PGLOBAL g, int maxres, bool info) return qrp; } // end of JDBCDrivers -#if 0 -/*************************************************************************/ -/* JDBCDataSources: constructs the result blocks containing all JDBC */ -/* data sources available on the local host. */ -/* Called with info=true to have result column names. */ -/*************************************************************************/ -PQRYRES JDBCDataSources(PGLOBAL g, int maxres, bool info) -{ - int buftyp[] ={ TYPE_STRING, TYPE_STRING }; - XFLD fldtyp[] ={ FLD_NAME, FLD_REM }; - unsigned int length[] ={ 0, 256 }; - bool b[] ={ false, true }; - int i, n = 0, ncol = 2; - PCOLRES crp; - PQRYRES qrp; - JDBConn *jcp = NULL; - - /************************************************************************/ - /* Do an evaluation of the result size. */ - /************************************************************************/ - if (!info) { - jcp = new(g)JDBConn(g, NULL); - n = jcp->GetMaxValue(SQL_MAX_DSN_LENGTH); - length[0] = (n) ? (n + 1) : 256; - - if (!maxres) - maxres = 512; // Estimated max number of data sources - - } else { - length[0] = 256; - maxres = 0; - } // endif info - - if (trace) - htrc("JDBCDataSources: max=%d len=%d\n", maxres, length[0]); - - /************************************************************************/ - /* Allocate the structures used to refer to the result set. */ - /************************************************************************/ - qrp = PlgAllocResult(g, ncol, maxres, IDS_DSRC, - buftyp, fldtyp, length, false, true); - - for (i = 0, crp = qrp->Colresp; crp; i++, crp = crp->Next) - if (b[i]) - crp->Kdata->SetNullable(true); - - /************************************************************************/ - /* Now get the results into blocks. */ - /************************************************************************/ - if (!info && qrp && jcp->GetDataSources(qrp)) - qrp = NULL; - - /************************************************************************/ - /* Return the result pointer for use by GetData routines. */ - /************************************************************************/ - return qrp; -} // end of JDBCDataSources - -/**************************************************************************/ -/* PrimaryKeys: constructs the result blocks containing all the */ -/* JDBC catalog information concerning primary keys. */ -/**************************************************************************/ -PQRYRES JDBCPrimaryKeys(PGLOBAL g, JDBConn *op, char *dsn, char *table) -{ - static int buftyp[] ={ TYPE_STRING, TYPE_STRING, TYPE_STRING, - TYPE_STRING, TYPE_SHORT, TYPE_STRING }; - static unsigned int length[] ={ 0, 0, 0, 0, 6, 128 }; - int n, ncol = 5; - int maxres; - PQRYRES qrp; - JCATPARM *cap; - JDBConn *jcp = op; - - if (!op) { - /**********************************************************************/ - /* Open the connection with the JDBC data source. */ - /**********************************************************************/ - jcp = new(g)JDBConn(g, NULL); - - if (jcp->Open(dsn, 2) < 1) // 2 is openReadOnly - return NULL; - - } // endif op - - /************************************************************************/ - /* Do an evaluation of the result size. */ - /************************************************************************/ - n = jcp->GetMaxValue(SQL_MAX_COLUMNS_IN_TABLE); - maxres = (n) ? (int)n : 250; - n = jcp->GetMaxValue(SQL_MAX_CATALOG_NAME_LEN); - length[0] = (n) ? (n + 1) : 128; - n = jcp->GetMaxValue(SQL_MAX_SCHEMA_NAME_LEN); - length[1] = (n) ? (n + 1) : 128; - n = jcp->GetMaxValue(SQL_MAX_TABLE_NAME_LEN); - length[2] = (n) ? (n + 1) : 128; - n = jcp->GetMaxValue(SQL_MAX_COLUMN_NAME_LEN); - length[3] = (n) ? (n + 1) : 128; - - if (trace) - htrc("JDBCPrimaryKeys: max=%d len=%d,%d,%d\n", - maxres, length[0], length[1], length[2]); - - /************************************************************************/ - /* Allocate the structure used to refer to the result set. */ - /************************************************************************/ - qrp = PlgAllocResult(g, ncol, maxres, IDS_PKEY, - buftyp, NULL, length, false, true); - - if (trace) - htrc("Getting pkey results ncol=%d\n", qrp->Nbcol); - - cap = AllocCatInfo(g, CAT_KEY, NULL, table, qrp); - - /************************************************************************/ - /* Now get the results into blocks. */ - /************************************************************************/ - if ((n = jcp->GetCatInfo(cap)) >= 0) { - qrp->Nblin = n; - // ResetNullValues(cap); - - if (trace) - htrc("PrimaryKeys: NBCOL=%d NBLIN=%d\n", qrp->Nbcol, qrp->Nblin); - - } else - qrp = NULL; - - /************************************************************************/ - /* Close any local connection. */ - /************************************************************************/ - if (!op) - jcp->Close(); - - /************************************************************************/ - /* Return the result pointer for use by GetData routines. */ - /************************************************************************/ - return qrp; -} // end of JDBCPrimaryKeys -#endif // 0 - /***********************************************************************/ /* JDBConn construction/destruction. */ /***********************************************************************/ @@ -739,60 +600,6 @@ bool JDBConn::gmID(PGLOBAL g, jmethodID& mid, const char *name, const char *sig } // end of gmID -#if 0 -/***********************************************************************/ -/* Utility routine. */ -/***********************************************************************/ -PSZ JDBConn::GetStringInfo(ushort infotype) -{ - //ASSERT(m_hdbc != SQL_NULL_HDBC); - char *p, buffer[MAX_STRING_INFO]; - SWORD result; - RETCODE rc; - - rc = SQLGetInfo(m_hdbc, infotype, buffer, sizeof(buffer), &result); - - if (!Check(rc)) { - ThrowDJX(rc, "SQLGetInfo"); // Temporary - // *buffer = '\0'; - } // endif rc - - p = PlugDup(m_G, buffer); - return p; -} // end of GetStringInfo - -/***********************************************************************/ -/* Utility routines. */ -/***********************************************************************/ -void JDBConn::OnSetOptions(HSTMT hstmt) -{ - RETCODE rc; - ASSERT(m_hdbc != SQL_NULL_HDBC); - - if ((signed)m_QueryTimeout != -1) { - // Attempt to set query timeout. Ignore failure - rc = SQLSetStmtOption(hstmt, SQL_QUERY_TIMEOUT, m_QueryTimeout); - - if (!Check(rc)) - // don't attempt it again - m_QueryTimeout = (DWORD)-1; - - } // endif m_QueryTimeout - - if (m_RowsetSize > 0) { - // Attempt to set rowset size. - // In case of failure reset it to 0 to use Fetch. - rc = SQLSetStmtOption(hstmt, SQL_ROWSET_SIZE, m_RowsetSize); - - if (!Check(rc)) - // don't attempt it again - m_RowsetSize = 0; - - } // endif m_RowsetSize - -} // end of OnSetOptions -#endif // 0 - /***********************************************************************/ /* Utility routine. */ /***********************************************************************/ @@ -1007,7 +814,7 @@ int JDBConn::Open(PJPARM sop) #define N 1 #endif - // Java source will be compiled as ajar file installed in the plugin dir + // Java source will be compiled as a jar file installed in the plugin dir jpop->Append(sep); jpop->Append(GetPluginDir()); jpop->Append("JdbcInterface.jar"); diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index 75bf277b25bf5..c45630129f120 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -595,7 +595,7 @@ PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty) fputs(EL, fs); fclose(fs); str = (err) ? NULL : strcpy(g->Message, "Ok"); - } else if (!err) { + } else if (!err) { str = ((JOUTSTR*)jp)->Strp; jp->WriteChr('\0'); PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); @@ -767,7 +767,7 @@ bool JOUTSTR::Escape(const char *s) { WriteChr('"'); - for (unsigned int i = 0; i < strlen(s); i++) + for (unsigned int i = 0; s[i]; i++) switch (s[i]) { case '"': case '\\': @@ -816,7 +816,7 @@ bool JOUTFILE::Escape(const char *s) // This is temporary fputc('"', Stream); - for (unsigned int i = 0; i < strlen(s); i++) + for (unsigned int i = 0; s[i]; i++) switch (s[i]) { case '"': fputs("\\\"", Stream); break; case '\\': fputs("\\\\", Stream); break; diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index da8be20723796..b4b03e6ba4a22 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -109,19 +109,7 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); /***********************************************************************/ char *GetPluginDir(void) { - char *plugin_dir; - -#if defined(_WIN64) - plugin_dir = (char *)GetProcAddress(GetModuleHandle(NULL), - "?opt_plugin_dir@@3PADEA"); -#elif defined(_WIN32) - plugin_dir = (char*)GetProcAddress(GetModuleHandle(NULL), - "?opt_plugin_dir@@3PADA"); -#else - plugin_dir = opt_plugin_dir; -#endif - - return plugin_dir; + return opt_plugin_dir; } // end of GetPluginDir /***********************************************************************/ diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index 2c8ada52e6f5c..8ad6e203d519c 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -294,7 +294,7 @@ int TABDEF::GetColCatInfo(PGLOBAL g) nlg+= nof; case TAB_DIR: case TAB_XML: - poff= loff + 1; + poff= loff + (pcf->Flags & U_VIRTUAL ? 0 : 1); break; case TAB_INI: case TAB_MAC: From 7d596c9ff526bc912769490023c44e9a5b2fa743 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 16 Sep 2016 22:14:14 +0200 Subject: [PATCH 03/44] - Working on MDEV-10525. Lrecl mismatch on DBF files modified: storage/connect/filamdbf.cpp modified: storage/connect/filamdbf.h modified: storage/connect/reldef.cpp --- storage/connect/filamdbf.cpp | 86 ++++++++++++++++++++++++++++-------- storage/connect/filamdbf.h | 2 +- storage/connect/reldef.cpp | 6 ++- 3 files changed, 74 insertions(+), 20 deletions(-) diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 8afda72357809..a4557facbd869 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -383,7 +383,7 @@ DBFBASE::DBFBASE(DBFBASE *txfp) /* and header length. Set Records, check that Reclen is equal to lrecl and */ /* return the header length or 0 in case of error. */ /****************************************************************************/ -int DBFBASE::ScanHeader(PGLOBAL g, PSZ fname, int lrecl, char *defpath) +int DBFBASE::ScanHeader(PGLOBAL g, PSZ fn, int lrecl, int *rln, char *defpath) { int rc; char filename[_MAX_PATH]; @@ -393,7 +393,7 @@ int DBFBASE::ScanHeader(PGLOBAL g, PSZ fname, int lrecl, char *defpath) /************************************************************************/ /* Open the input file. */ /************************************************************************/ - PlugSetPath(filename, fname, defpath); + PlugSetPath(filename, fn, defpath); if (!(infile= global_fopen(g, MSGID_CANNOT_OPEN, filename, "rb"))) return 0; // Assume file does not exist @@ -410,11 +410,7 @@ int DBFBASE::ScanHeader(PGLOBAL g, PSZ fname, int lrecl, char *defpath) } else if (rc == RC_FX) return -1; - if ((int)header.Reclen() != lrecl) { - sprintf(g->Message, MSG(BAD_LRECL), lrecl, header.Reclen()); - return -1; - } // endif Lrecl - + *rln = (int)header.Reclen(); Records = (int)header.Records(); return (int)header.Headlen(); } // end of ScanHeader @@ -431,9 +427,27 @@ int DBFFAM::Cardinality(PGLOBAL g) if (!g) return 1; - if (!Headlen) - if ((Headlen = ScanHeader(g, To_File, Lrecl, Tdbp->GetPath())) < 0) - return -1; // Error in ScanHeader + if (!Headlen) { + int rln = 0; // Record length in the file header + + Headlen = ScanHeader(g, To_File, Lrecl, &rln, Tdbp->GetPath()); + + if (Headlen < 0) + return -1; // Error in ScanHeader + + if (rln && Lrecl != rln) { + // This happens always on some Linux platforms + sprintf(g->Message, MSG(BAD_LRECL), Lrecl, rln); + + if (Accept) { + Lrecl = rln; + PushWarning(g, Tdbp); + } else + return -1; + + } // endif rln + + } // endif Headlen // Set number of blocks for later use Block = (Records > 0) ? (Records + Nrec - 1) / Nrec : 0; @@ -565,7 +579,13 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g) if (Lrecl != reclen) { sprintf(g->Message, MSG(BAD_LRECL), Lrecl, reclen); - return true; + + if (Accept) { + Lrecl = reclen; + PushWarning(g, Tdbp); + } else + return true; + } // endif Lrecl hlen = HEADLEN * (n + 1) + 2; @@ -641,8 +661,14 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g) if ((rc = dbfhead(g, Stream, Tdbp->GetFile(g), &header)) == RC_OK) { if (Lrecl != (int)header.Reclen()) { sprintf(g->Message, MSG(BAD_LRECL), Lrecl, header.Reclen()); - return true; - } // endif Lrecl + + if (Accept) { + Lrecl = header.Reclen(); + PushWarning(g, Tdbp); + } else + return true; + + } // endif Lrecl Records = (int)header.Records(); Headlen = (int)header.Headlen(); @@ -916,9 +942,27 @@ int DBMFAM::Cardinality(PGLOBAL g) if (!g) return 1; - if (!Headlen) - if ((Headlen = ScanHeader(g, To_File, Lrecl, Tdbp->GetPath())) < 0) - return -1; // Error in ScanHeader + if (!Headlen) { + int rln = 0; // Record length in the file header + + Headlen = ScanHeader(g, To_File, Lrecl, &rln, Tdbp->GetPath()); + + if (Headlen < 0) + return -1; // Error in ScanHeader + + if (rln && Lrecl != rln) { + // This happens always on some Linux platforms + sprintf(g->Message, MSG(BAD_LRECL), Lrecl, rln); + + if (Accept) { + Lrecl = rln; + PushWarning(g, Tdbp); + } else + return -1; + + } // endif rln + + } // endif Headlen // Set number of blocks for later use Block = (Records > 0) ? (Records + Nrec - 1) / Nrec : 0; @@ -961,8 +1005,14 @@ bool DBMFAM::AllocateBuffer(PGLOBAL g) if (Lrecl != (int)hp->Reclen()) { sprintf(g->Message, MSG(BAD_LRECL), Lrecl, hp->Reclen()); - return true; - } // endif Lrecl + + if (Accept) { + Lrecl = hp->Reclen(); + PushWarning(g, Tdbp); + } else + return true; + + } // endif Lrecl Records = (int)hp->Records(); Headlen = (int)hp->Headlen(); diff --git a/storage/connect/filamdbf.h b/storage/connect/filamdbf.h index da84d7685a8dd..66458a10eaad9 100644 --- a/storage/connect/filamdbf.h +++ b/storage/connect/filamdbf.h @@ -31,7 +31,7 @@ class DllExport DBFBASE { DBFBASE(PDBF txfp); // Implementation - int ScanHeader(PGLOBAL g, PSZ fname, int lrecl, char *defpath); + int ScanHeader(PGLOBAL g, PSZ fname, int lrecl, int *rlen, char *defpath); protected: // Default constructor, not to be used diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index 8ad6e203d519c..ac2327212e0fa 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -440,7 +440,11 @@ int TABDEF::GetColCatInfo(PGLOBAL g) } // endswitch tc // lrecl must be at least recln to avoid buffer overflow - recln= MY_MAX(recln, Hc->GetIntegerOption("Lrecl")); + if (trace) + htrc("Lrecl: Calculated=%d defined=%d\n", + recln, Hc->GetIntegerOption("Lrecl")); + + recln = MY_MAX(recln, Hc->GetIntegerOption("Lrecl")); Hc->SetIntegerOption("Lrecl", recln); ((PDOSDEF)this)->SetLrecl(recln); } // endif Lrecl From 7cb79a65ba6286ac66d5ebbebea3243ef97f5c41 Mon Sep 17 00:00:00 2001 From: Arun Kuruvila Date: Wed, 28 Sep 2016 15:52:05 +0530 Subject: [PATCH 04/44] Bug#24707666: DEFAULT SETTING FOR SECURE-FILE-PRIV SHOULD BE RESTRICTED IN ALL GA RELEASES Back port of WL#6782 to 5.5 and 5.6. This also includes back port of Bug#20771331, Bug#20741572 and Bug#20770671. Bug#24695274 and Bug#24679907 are also handled along with this. --- cmake/install_layout.cmake | 256 +++++++++++++++++- config.h.cmake | 4 + mysql-test/include/mtr_warnings.sql | 7 +- mysql-test/include/mysqld--help.inc | 3 +- mysql-test/mysql-test-run.pl | 4 +- mysql-test/r/mysqld--help-notwin.result | 1 - mysql-test/r/mysqld--help-win.result | 1 - .../auth_sec/r/secure_file_priv_error.result | 7 + .../auth_sec/r/secure_file_priv_null.result | 21 ++ .../r/secure_file_priv_warnings.result | 17 ++ .../secure_file_priv_warnings_not_win.result | 9 + .../r/secure_file_priv_warnings_win.result | 8 + .../auth_sec/t/secure_file_priv_error.test | 39 +++ .../t/secure_file_priv_null-master.opt | 1 + .../auth_sec/t/secure_file_priv_null.test | 42 +++ .../t/secure_file_priv_warnings-master.opt | 1 + .../auth_sec/t/secure_file_priv_warnings.test | 47 ++++ .../t/secure_file_priv_warnings_not_win.test | 24 ++ .../t/secure_file_priv_warnings_win.test | 35 +++ packaging/rpm-oel/mysql-systemd-start | 6 + packaging/rpm-oel/mysql.init | 10 +- packaging/rpm-oel/mysql.spec.in | 5 + packaging/rpm-sles/mysql.spec.in | 5 + packaging/solaris/postinstall-solaris.sh | 8 +- sql/mysqld.cc | 244 +++++++++++++++-- sql/sql_class.cc | 2 + sql/sql_class.h | 1 + sql/sys_vars.cc | 8 +- support-files/mysql.spec.sh | 7 +- 29 files changed, 786 insertions(+), 37 deletions(-) create mode 100644 mysql-test/suite/auth_sec/r/secure_file_priv_error.result create mode 100644 mysql-test/suite/auth_sec/r/secure_file_priv_null.result create mode 100644 mysql-test/suite/auth_sec/r/secure_file_priv_warnings.result create mode 100644 mysql-test/suite/auth_sec/r/secure_file_priv_warnings_not_win.result create mode 100644 mysql-test/suite/auth_sec/r/secure_file_priv_warnings_win.result create mode 100644 mysql-test/suite/auth_sec/t/secure_file_priv_error.test create mode 100644 mysql-test/suite/auth_sec/t/secure_file_priv_null-master.opt create mode 100644 mysql-test/suite/auth_sec/t/secure_file_priv_null.test create mode 100644 mysql-test/suite/auth_sec/t/secure_file_priv_warnings-master.opt create mode 100644 mysql-test/suite/auth_sec/t/secure_file_priv_warnings.test create mode 100644 mysql-test/suite/auth_sec/t/secure_file_priv_warnings_not_win.test create mode 100644 mysql-test/suite/auth_sec/t/secure_file_priv_warnings_win.test diff --git a/cmake/install_layout.cmake b/cmake/install_layout.cmake index 4adda0b6eacb9..4fd18b049f238 100644 --- a/cmake/install_layout.cmake +++ b/cmake/install_layout.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -22,7 +22,7 @@ # and relative links. Windows zip uses the same tarball layout but without # the build prefix. # -# RPM +# RPM, SLES # Build as per default RPM layout, with prefix=/usr # Note: The layout for ULN RPMs differs, see the "RPM" section. # @@ -32,10 +32,22 @@ # SVR4 # Solaris package layout suitable for pkg* tools, prefix=/opt/mysql/mysql # +# FREEBSD, GLIBC, OSX, TARGZ +# Build with prefix=/usr/local/mysql, create tarball with install prefix="." +# and relative links. +# +# WIN +# Windows zip : same as tarball layout but without the build prefix +# # To force a directory layout, use -DINSTALL_LAYOUT=. # # The default is STANDALONE. # +# Note : At present, RPM and SLES layouts are similar. This is also true +# for layouts like FREEBSD, GLIBC, OSX, TARGZ. However, they provide +# opportunity to fine-tune deployment for each platform without +# affecting all other types of deployment. +# # There is the possibility to further fine-tune installation directories. # Several variables can be overwritten: # @@ -60,6 +72,7 @@ # - INSTALL_SUPPORTFILESDIR (various extra support files) # # - INSTALL_MYSQLDATADIR (data directory) +# - INSTALL_SECURE_FILE_PRIVDIR (--secure-file-priv directory) # # When changing this page, _please_ do not forget to update public Wiki # http://forge.mysql.com/wiki/CMake#Fine-tuning_installation_paths @@ -69,10 +82,11 @@ IF(NOT INSTALL_LAYOUT) ENDIF() SET(INSTALL_LAYOUT "${DEFAULT_INSTALL_LAYOUT}" -CACHE STRING "Installation directory layout. Options are: STANDALONE (as in zip or tar.gz installer), RPM, DEB, SVR4") +CACHE STRING "Installation directory layout. Options are: TARGZ (as in tar.gz installer), WIN (as in zip installer), STANDALONE, RPM, DEB, SVR4, FREEBSD, GLIBC, OSX, SLES") IF(UNIX) - IF(INSTALL_LAYOUT MATCHES "RPM") + IF(INSTALL_LAYOUT MATCHES "RPM" OR + INSTALL_LAYOUT MATCHES "SLES") SET(default_prefix "/usr") ELSEIF(INSTALL_LAYOUT MATCHES "DEB") SET(default_prefix "/opt/mysql/server-${MYSQL_BASE_VERSION}") @@ -87,7 +101,7 @@ IF(UNIX) SET(CMAKE_INSTALL_PREFIX ${default_prefix} CACHE PATH "install prefix" FORCE) ENDIF() - SET(VALID_INSTALL_LAYOUTS "RPM" "STANDALONE" "DEB" "SVR4") + SET(VALID_INSTALL_LAYOUTS "RPM" "DEB" "SVR4" "FREEBSD" "GLIBC" "OSX" "TARGZ" "SLES" "STANDALONE") LIST(FIND VALID_INSTALL_LAYOUTS "${INSTALL_LAYOUT}" ind) IF(ind EQUAL -1) MESSAGE(FATAL_ERROR "Invalid INSTALL_LAYOUT parameter:${INSTALL_LAYOUT}." @@ -99,6 +113,15 @@ IF(UNIX) MARK_AS_ADVANCED(SYSCONFDIR) ENDIF() +IF(WIN32) + SET(VALID_INSTALL_LAYOUTS "TARGZ" "STANDALONE" "WIN") + LIST(FIND VALID_INSTALL_LAYOUTS "${INSTALL_LAYOUT}" ind) + IF(ind EQUAL -1) + MESSAGE(FATAL_ERROR "Invalid INSTALL_LAYOUT parameter:${INSTALL_LAYOUT}." + " Choose between ${VALID_INSTALL_LAYOUTS}" ) + ENDIF() +ENDIF() + # # plugin_tests's value should not be used by imported plugins, # just use if(INSTALL_PLUGINTESTDIR). @@ -109,6 +132,22 @@ FILE(GLOB plugin_tests ${CMAKE_SOURCE_DIR}/internal/plugin/*/tests ) +# +# DEFAULT_SECURE_FILE_PRIV_DIR/DEFAULT_SECURE_FILE_PRIV_EMBEDDED_DIR +# +IF(INSTALL_LAYOUT MATCHES "STANDALONE" OR + INSTALL_LAYOUT MATCHES "WIN") + SET(secure_file_priv_path "NULL") +ELSEIF(INSTALL_LAYOUT MATCHES "RPM" OR + INSTALL_LAYOUT MATCHES "SLES" OR + INSTALL_LAYOUT MATCHES "SVR4" OR + INSTALL_LAYOUT MATCHES "DEB") + SET(secure_file_priv_path "/var/lib/mysql-files") +ELSE() + SET(secure_file_priv_path "${default_prefix}/mysql-files") +ENDIF() +SET(secure_file_priv_embedded_path "NULL") + # # STANDALONE layout # @@ -134,6 +173,148 @@ SET(INSTALL_SUPPORTFILESDIR_STANDALONE "support-files") # SET(INSTALL_MYSQLDATADIR_STANDALONE "data") SET(INSTALL_PLUGINTESTDIR_STANDALONE ${plugin_tests}) +SET(INSTALL_SECURE_FILE_PRIVDIR_STANDALONE ${secure_file_priv_path}) +SET(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR_STANDALONE ${secure_file_priv_embedded_path}) + +# +# WIN layout +# +SET(INSTALL_BINDIR_WIN "bin") +SET(INSTALL_SBINDIR_WIN "bin") +SET(INSTALL_SCRIPTDIR_WIN "scripts") +# +SET(INSTALL_LIBDIR_WIN "lib") +SET(INSTALL_PLUGINDIR_WIN "lib/plugin") +# +SET(INSTALL_INCLUDEDIR_WIN "include") +# +SET(INSTALL_DOCDIR_WIN "docs") +SET(INSTALL_DOCREADMEDIR_WIN ".") +SET(INSTALL_MANDIR_WIN "man") +SET(INSTALL_INFODIR_WIN "docs") +# +SET(INSTALL_SHAREDIR_WIN "share") +SET(INSTALL_MYSQLSHAREDIR_WIN "share") +SET(INSTALL_MYSQLTESTDIR_WIN "mysql-test") +SET(INSTALL_SQLBENCHDIR_WIN ".") +SET(INSTALL_SUPPORTFILESDIR_WIN "support-files") +# +SET(INSTALL_MYSQLDATADIR_WIN "data") +SET(INSTALL_PLUGINTESTDIR_WIN ${plugin_tests}) +SET(INSTALL_SECURE_FILE_PRIVDIR_WIN ${secure_file_priv_path}) +SET(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR_WIN ${secure_file_priv_embedded_path}) + +# +# FREEBSD layout +# +SET(INSTALL_BINDIR_FREEBSD "bin") +SET(INSTALL_SBINDIR_FREEBSD "bin") +SET(INSTALL_SCRIPTDIR_FREEBSD "scripts") +# +SET(INSTALL_LIBDIR_FREEBSD "lib") +SET(INSTALL_PLUGINDIR_FREEBSD "lib/plugin") +# +SET(INSTALL_INCLUDEDIR_FREEBSD "include") +# +SET(INSTALL_DOCDIR_FREEBSD "docs") +SET(INSTALL_DOCREADMEDIR_FREEBSD ".") +SET(INSTALL_MANDIR_FREEBSD "man") +SET(INSTALL_INFODIR_FREEBSD "docs") +# +SET(INSTALL_SHAREDIR_FREEBSD "share") +SET(INSTALL_MYSQLSHAREDIR_FREEBSD "share") +SET(INSTALL_MYSQLTESTDIR_FREEBSD "mysql-test") +SET(INSTALL_SQLBENCHDIR_FREEBSD ".") +SET(INSTALL_SUPPORTFILESDIR_FREEBSD "support-files") +# +SET(INSTALL_MYSQLDATADIR_FREEBSD "data") +SET(INSTALL_PLUGINTESTDIR_FREEBSD ${plugin_tests}) +SET(INSTALL_SECURE_FILE_PRIVDIR_FREEBSD ${secure_file_priv_path}) +SET(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR_FREEBSD ${secure_file_priv_embedded_path}) + +# +# GLIBC layout +# +SET(INSTALL_BINDIR_GLIBC "bin") +SET(INSTALL_SBINDIR_GLIBC "bin") +SET(INSTALL_SCRIPTDIR_GLIBC "scripts") +# +SET(INSTALL_LIBDIR_GLIBC "lib") +SET(INSTALL_PLUGINDIR_GLIBC "lib/plugin") +# +SET(INSTALL_INCLUDEDIR_GLIBC "include") +# +SET(INSTALL_DOCDIR_GLIBC "docs") +SET(INSTALL_DOCREADMEDIR_GLIBC ".") +SET(INSTALL_MANDIR_GLIBC "man") +SET(INSTALL_INFODIR_GLIBC "docs") +# +SET(INSTALL_SHAREDIR_GLIBC "share") +SET(INSTALL_MYSQLSHAREDIR_GLIBC "share") +SET(INSTALL_MYSQLTESTDIR_GLIBC "mysql-test") +SET(INSTALL_SQLBENCHDIR_GLIBC ".") +SET(INSTALL_SUPPORTFILESDIR_GLIBC "support-files") +# +SET(INSTALL_MYSQLDATADIR_GLIBC "data") +SET(INSTALL_PLUGINTESTDIR_GLIBC ${plugin_tests}) +SET(INSTALL_SECURE_FILE_PRIVDIR_GLIBC ${secure_file_priv_path}) +SET(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR_GLIBC ${secure_file_priv_embedded_path}) + +# +# OSX layout +# +SET(INSTALL_BINDIR_OSX "bin") +SET(INSTALL_SBINDIR_OSX "bin") +SET(INSTALL_SCRIPTDIR_OSX "scripts") +# +SET(INSTALL_LIBDIR_OSX "lib") +SET(INSTALL_PLUGINDIR_OSX "lib/plugin") +# +SET(INSTALL_INCLUDEDIR_OSX "include") +# +SET(INSTALL_DOCDIR_OSX "docs") +SET(INSTALL_DOCREADMEDIR_OSX ".") +SET(INSTALL_MANDIR_OSX "man") +SET(INSTALL_INFODIR_OSX "docs") +# +SET(INSTALL_SHAREDIR_OSX "share") +SET(INSTALL_MYSQLSHAREDIR_OSX "share") +SET(INSTALL_MYSQLTESTDIR_OSX "mysql-test") +SET(INSTALL_SQLBENCHDIR_OSX ".") +SET(INSTALL_SUPPORTFILESDIR_OSX "support-files") +# +SET(INSTALL_MYSQLDATADIR_OSX "data") +SET(INSTALL_PLUGINTESTDIR_OSX ${plugin_tests}) +SET(INSTALL_SECURE_FILE_PRIVDIR_OSX ${secure_file_priv_path}) +SET(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR_OSX ${secure_file_priv_embedded_path}) + +# +# TARGZ layout +# +SET(INSTALL_BINDIR_TARGZ "bin") +SET(INSTALL_SBINDIR_TARGZ "bin") +SET(INSTALL_SCRIPTDIR_TARGZ "scripts") +# +SET(INSTALL_LIBDIR_TARGZ "lib") +SET(INSTALL_PLUGINDIR_TARGZ "lib/plugin") +# +SET(INSTALL_INCLUDEDIR_TARGZ "include") +# +SET(INSTALL_DOCDIR_TARGZ "docs") +SET(INSTALL_DOCREADMEDIR_TARGZ ".") +SET(INSTALL_MANDIR_TARGZ "man") +SET(INSTALL_INFODIR_TARGZ "docs") +# +SET(INSTALL_SHAREDIR_TARGZ "share") +SET(INSTALL_MYSQLSHAREDIR_TARGZ "share") +SET(INSTALL_MYSQLTESTDIR_TARGZ "mysql-test") +SET(INSTALL_SQLBENCHDIR_TARGZ ".") +SET(INSTALL_SUPPORTFILESDIR_TARGZ "support-files") +# +SET(INSTALL_MYSQLDATADIR_TARGZ "data") +SET(INSTALL_PLUGINTESTDIR_TARGZ ${plugin_tests}) +SET(INSTALL_SECURE_FILE_PRIVDIR_TARGZ ${secure_file_priv_path}) +SET(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR_TARGZ ${secure_file_priv_embedded_path}) # # RPM layout @@ -169,6 +350,41 @@ SET(INSTALL_SUPPORTFILESDIR_RPM "share/mysql") # SET(INSTALL_MYSQLDATADIR_RPM "/var/lib/mysql") SET(INSTALL_PLUGINTESTDIR_RPM ${plugin_tests}) +SET(INSTALL_SECURE_FILE_PRIVDIR_RPM ${secure_file_priv_path}) +SET(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR_RPM ${secure_file_priv_embedded_path}) + +# +# SLES layout +# +SET(INSTALL_BINDIR_SLES "bin") +SET(INSTALL_SBINDIR_SLES "sbin") +SET(INSTALL_SCRIPTDIR_SLES "bin") +# +IF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") + SET(INSTALL_LIBDIR_SLES "lib64") + SET(INSTALL_PLUGINDIR_SLES "lib64/mysql/plugin") +ELSE() + SET(INSTALL_LIBDIR_SLES "lib") + SET(INSTALL_PLUGINDIR_SLES "lib/mysql/plugin") +ENDIF() +# +SET(INSTALL_INCLUDEDIR_SLES "include/mysql") +# +#SET(INSTALL_DOCDIR_SLES unset - installed directly by SLES) +#SET(INSTALL_DOCREADMEDIR_SLES unset - installed directly by SLES) +SET(INSTALL_INFODIR_SLES "share/info") +SET(INSTALL_MANDIR_SLES "share/man") +# +SET(INSTALL_SHAREDIR_SLES "share") +SET(INSTALL_MYSQLSHAREDIR_SLES "share/mysql") +SET(INSTALL_MYSQLTESTDIR_SLES "share/mysql-test") +SET(INSTALL_SQLBENCHDIR_SLES "") +SET(INSTALL_SUPPORTFILESDIR_SLES "share/mysql") +# +SET(INSTALL_MYSQLDATADIR_SLES "/var/lib/mysql") +SET(INSTALL_PLUGINTESTDIR_SLES ${plugin_tests}) +SET(INSTALL_SECURE_FILE_PRIVDIR_SLES ${secure_file_priv_path}) +SET(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR_SLES ${secure_file_priv_embedded_path}) # # DEB layout @@ -193,8 +409,10 @@ SET(INSTALL_MYSQLTESTDIR_DEB "mysql-test") SET(INSTALL_SQLBENCHDIR_DEB ".") SET(INSTALL_SUPPORTFILESDIR_DEB "support-files") # -SET(INSTALL_MYSQLDATADIR_DEB "data") +SET(INSTALL_MYSQLDATADIR_DEB "/var/lib/mysql") SET(INSTALL_PLUGINTESTDIR_DEB ${plugin_tests}) +SET(INSTALL_SECURE_FILE_PRIVDIR_DEB ${secure_file_priv_path}) +SET(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR_DEB ${secure_file_priv_embedded_path}) # # SVR4 layout @@ -221,7 +439,8 @@ SET(INSTALL_SUPPORTFILESDIR_SVR4 "support-files") # SET(INSTALL_MYSQLDATADIR_SVR4 "/var/lib/mysql") SET(INSTALL_PLUGINTESTDIR_SVR4 ${plugin_tests}) - +SET(INSTALL_SECURE_FILE_PRIVDIR_SVR4 ${secure_file_priv_path}) +SET(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR_SVR4 ${secure_file_priv_embedded_path}) # Clear cached variables if install layout was changed IF(OLD_INSTALL_LAYOUT) @@ -235,8 +454,29 @@ SET(OLD_INSTALL_LAYOUT ${INSTALL_LAYOUT} CACHE INTERNAL "") # will be defined as ${INSTALL_BINDIR_STANDALONE} by default if STANDALONE # layout is chosen) FOREACH(var BIN SBIN LIB MYSQLSHARE SHARE PLUGIN INCLUDE SCRIPT DOC MAN - INFO MYSQLTEST SQLBENCH DOCREADME SUPPORTFILES MYSQLDATA PLUGINTEST) + INFO MYSQLTEST SQLBENCH DOCREADME SUPPORTFILES MYSQLDATA PLUGINTEST + SECURE_FILE_PRIV SECURE_FILE_PRIV_EMBEDDED) SET(INSTALL_${var}DIR ${INSTALL_${var}DIR_${INSTALL_LAYOUT}} CACHE STRING "${var} installation directory" ${FORCE}) MARK_AS_ADVANCED(INSTALL_${var}DIR) ENDFOREACH() + +# +# Set DEFAULT_SECURE_FILE_PRIV_DIR +# This is used as default value for --secure-file-priv +# +IF(INSTALL_SECURE_FILE_PRIVDIR) + SET(DEFAULT_SECURE_FILE_PRIV_DIR "\"${INSTALL_SECURE_FILE_PRIVDIR}\"" + CACHE INTERNAL "default --secure-file-priv directory" FORCE) +ELSE() + SET(DEFAULT_SECURE_FILE_PRIV_DIR \"\" + CACHE INTERNAL "default --secure-file-priv directory" FORCE) +ENDIF() + +IF(INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR) + SET(DEFAULT_SECURE_FILE_PRIV_EMBEDDED_DIR "\"${INSTALL_SECURE_FILE_PRIV_EMBEDDEDDIR}\"" + CACHE INTERNAL "default --secure-file-priv directory (for embedded library)" FORCE) +ELSE() + SET(DEFAULT_SECURE_FILE_PRIV_EMBEDDED_DIR "NULL" + CACHE INTERNAL "default --secure-file-priv directory (for embedded library)" FORCE) +ENDIF() diff --git a/config.h.cmake b/config.h.cmake index 4548d0a221f24..c7ed127379a32 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -624,4 +624,8 @@ #cmakedefine SIZEOF_TIME_T @SIZEOF_TIME_T@ #cmakedefine TIME_T_UNSIGNED @TIME_T_UNSIGNED@ +/* For --secure-file-priv */ +#cmakedefine DEFAULT_SECURE_FILE_PRIV_DIR @DEFAULT_SECURE_FILE_PRIV_DIR@ +#cmakedefine DEFAULT_SECURE_FILE_PRIV_EMBEDDED_DIR @DEFAULT_SECURE_FILE_PRIV_EMBEDDED_DIR@ + #endif diff --git a/mysql-test/include/mtr_warnings.sql b/mysql-test/include/mtr_warnings.sql index 45acbc03b7e84..0a3c3bc60b3e6 100644 --- a/mysql-test/include/mtr_warnings.sql +++ b/mysql-test/include/mtr_warnings.sql @@ -1,4 +1,4 @@ --- Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. +-- Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. -- -- This program is free software; you can redistribute it and/or modify -- it under the terms of the GNU General Public License as published by @@ -204,6 +204,11 @@ INSERT INTO global_suppressions VALUES */ ("Found lock of type 6 that is write and read locked"), + /* + Warnings related to --secure-file-priv + */ + ("Insecure configuration for --secure-file-priv:*"), + ("THE_LAST_SUPPRESSION")|| diff --git a/mysql-test/include/mysqld--help.inc b/mysql-test/include/mysqld--help.inc index 380a7f6c8cfa8..7fa57abbe1ed5 100644 --- a/mysql-test/include/mysqld--help.inc +++ b/mysql-test/include/mysqld--help.inc @@ -18,7 +18,8 @@ perl; # their paths may vary: @skipvars=qw/basedir open-files-limit general-log-file log plugin-dir log-slow-queries pid-file slow-query-log-file - datadir slave-load-tmpdir tmpdir socket/; + datadir slave-load-tmpdir tmpdir socket + secure-file-priv/; # Plugins which may or may not be there: @plugins=qw/innodb ndb archive blackhole federated partition ndbcluster debug temp-pool ssl des-key-file diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 684d262f41094..3eb70c1bdb9b4 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -1,7 +1,7 @@ #!/usr/bin/perl # -*- cperl -*- -# Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -1823,6 +1823,7 @@ sub collect_mysqld_features { mtr_init_args(\$args); mtr_add_arg($args, "--no-defaults"); mtr_add_arg($args, "--datadir=%s", mixed_path($tmpdir)); + mtr_add_arg($args, "--secure-file-priv=\"\""); mtr_add_arg($args, "--lc-messages-dir=%s", $path_language); mtr_add_arg($args, "--skip-grant-tables"); mtr_add_arg($args, "--verbose"); @@ -3297,6 +3298,7 @@ sub mysql_install_db { mtr_add_arg($args, "--loose-skip-falcon"); mtr_add_arg($args, "--loose-skip-ndbcluster"); mtr_add_arg($args, "--tmpdir=%s", "$opt_vardir/tmp/"); + mtr_add_arg($args, "--secure-file-priv=%s", "$opt_vardir"); mtr_add_arg($args, "--core-file"); if ( $opt_debug ) diff --git a/mysql-test/r/mysqld--help-notwin.result b/mysql-test/r/mysqld--help-notwin.result index d527d6cb70279..78dc9ab4d880a 100644 --- a/mysql-test/r/mysqld--help-notwin.result +++ b/mysql-test/r/mysqld--help-notwin.result @@ -923,7 +923,6 @@ report-user (No default value) rpl-recovery-rank 0 safe-user-create FALSE secure-auth FALSE -secure-file-priv (No default value) server-id 0 show-slave-auth-info FALSE skip-grant-tables TRUE diff --git a/mysql-test/r/mysqld--help-win.result b/mysql-test/r/mysqld--help-win.result index 2ce9e763b14ad..1d56da7aa5e6e 100644 --- a/mysql-test/r/mysqld--help-win.result +++ b/mysql-test/r/mysqld--help-win.result @@ -931,7 +931,6 @@ report-user (No default value) rpl-recovery-rank 0 safe-user-create FALSE secure-auth FALSE -secure-file-priv (No default value) server-id 0 shared-memory FALSE shared-memory-base-name MYSQL diff --git a/mysql-test/suite/auth_sec/r/secure_file_priv_error.result b/mysql-test/suite/auth_sec/r/secure_file_priv_error.result new file mode 100644 index 0000000000000..4bb4d87c5f068 --- /dev/null +++ b/mysql-test/suite/auth_sec/r/secure_file_priv_error.result @@ -0,0 +1,7 @@ +#----------------------------------------------------------------------- +# Setup +# Try to restart server with invalid value for --secure-file-priv +# Search for : Failed to access directory for --secure-file-priv. +# Restart completed. +# Restart +#----------------------------------------------------------------------- diff --git a/mysql-test/suite/auth_sec/r/secure_file_priv_null.result b/mysql-test/suite/auth_sec/r/secure_file_priv_null.result new file mode 100644 index 0000000000000..e2a5102c627e3 --- /dev/null +++ b/mysql-test/suite/auth_sec/r/secure_file_priv_null.result @@ -0,0 +1,21 @@ +#----------------------------------------------------------------------- +# Setup +#----------------------------------------------------------------------- +# Search for : --secure-file-priv is set to NULL. Operations +# related to importing and exporting data are +# disabled +show variables like 'secure_file_priv'; +Variable_name Value +secure_file_priv null +use test; +drop table if exists secure_file_priv_test_null; +create table secure_file_priv_test_null(c1 int); +insert into secure_file_priv_test_null values (1), (2), (3), (4); +select * from secure_file_priv_test_null into outfile 'blah'; +ERROR HY000: The MySQL server is running with the --secure-file-priv option so it cannot execute this statement +select * from secure_file_priv_test_null into outfile 'null/blah'; +ERROR HY000: The MySQL server is running with the --secure-file-priv option so it cannot execute this statement +drop table secure_file_priv_test_null; +#----------------------------------------------------------------------- +# Clean-up +#----------------------------------------------------------------------- diff --git a/mysql-test/suite/auth_sec/r/secure_file_priv_warnings.result b/mysql-test/suite/auth_sec/r/secure_file_priv_warnings.result new file mode 100644 index 0000000000000..3b80cbe8d6fcf --- /dev/null +++ b/mysql-test/suite/auth_sec/r/secure_file_priv_warnings.result @@ -0,0 +1,17 @@ +#----------------------------------------------------------------------- +# Setup +#----------------------------------------------------------------------- +# Search for : Insecure configuration for --secure-file-priv: Current +# value does not restrict location of generated files. +# Consider setting it to a valid, non-empty path. +SHOW VARIABLES LIKE 'secure_file_priv'; +Variable_name Value +secure_file_priv +#----------------------------------------------------------------------- +# Restart completed. +# Search for : Insecure configuration for --secure-file-priv: Plugin +# directory is accessible through --secure-file-priv. +# Consider choosing a different directory. +#----------------------------------------------------------------------- +# Clean-up +#----------------------------------------------------------------------- diff --git a/mysql-test/suite/auth_sec/r/secure_file_priv_warnings_not_win.result b/mysql-test/suite/auth_sec/r/secure_file_priv_warnings_not_win.result new file mode 100644 index 0000000000000..84e2f8ac3c214 --- /dev/null +++ b/mysql-test/suite/auth_sec/r/secure_file_priv_warnings_not_win.result @@ -0,0 +1,9 @@ +#----------------------------------------------------------------------- +# Search for : Insecure configuration for --secure-file-priv: Data +# directory is accessible through --secure-file-priv. +# Consider choosing a different directory. +#----------------------------------------------------------------------- +# Search for : Insecure configuration for --secure-file-priv: Location +# is accessible to all OS users. Consider choosing a +# different directory. +#----------------------------------------------------------------------- diff --git a/mysql-test/suite/auth_sec/r/secure_file_priv_warnings_win.result b/mysql-test/suite/auth_sec/r/secure_file_priv_warnings_win.result new file mode 100644 index 0000000000000..3beff6c4747fe --- /dev/null +++ b/mysql-test/suite/auth_sec/r/secure_file_priv_warnings_win.result @@ -0,0 +1,8 @@ +#----------------------------------------------------------------------- +# Test 2 : Restarting mysqld with : +# --secure-file-priv=MYSQLTEST_VARDIR/mysqld.1/Data +# Restart completed. +# Search for : Insecure configuration for --secure-file-priv: Data +# directory is accessible through --secure-file-priv. +# Consider choosing a different directory. +#----------------------------------------------------------------------- diff --git a/mysql-test/suite/auth_sec/t/secure_file_priv_error.test b/mysql-test/suite/auth_sec/t/secure_file_priv_error.test new file mode 100644 index 0000000000000..9f8d185d8f51c --- /dev/null +++ b/mysql-test/suite/auth_sec/t/secure_file_priv_error.test @@ -0,0 +1,39 @@ +--source include/no_valgrind_without_big.inc +--source include/not_embedded.inc + +--echo #----------------------------------------------------------------------- +--echo # Setup +let restart_log= $MYSQLTEST_VARDIR/log/my_restart.err; +let SEARCH_FILE= $restart_log; +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; + +--echo # Try to restart server with invalid value for --secure-file-priv +--exec echo "wait" > $restart_file +--shutdown_server +--source include/wait_until_disconnected.inc + +--error 0,1 +--remove_file $restart_log +# Following should fail +--error 1 +--exec $MYSQLD_CMD --secure-file-priv=blahblahblah --loose-console > $restart_log 2>&1 + +--echo # Search for : Failed to access directory for --secure-file-priv. +let SEARCH_PATTERN= Failed to access directory for --secure-file-priv; +--source include/search_pattern_in_file.inc + +--remove_file $restart_log + +--source include/wait_until_disconnected.inc +# Dummy argument for restart +--exec echo "restart:" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect +--echo # Restart completed. + +--echo # Restart +--disable_warnings +--source include/force_restart.inc +--enable_warnings +--echo #----------------------------------------------------------------------- diff --git a/mysql-test/suite/auth_sec/t/secure_file_priv_null-master.opt b/mysql-test/suite/auth_sec/t/secure_file_priv_null-master.opt new file mode 100644 index 0000000000000..80d7f3cd46905 --- /dev/null +++ b/mysql-test/suite/auth_sec/t/secure_file_priv_null-master.opt @@ -0,0 +1 @@ +--secure-file-priv=null diff --git a/mysql-test/suite/auth_sec/t/secure_file_priv_null.test b/mysql-test/suite/auth_sec/t/secure_file_priv_null.test new file mode 100644 index 0000000000000..8d394a135895a --- /dev/null +++ b/mysql-test/suite/auth_sec/t/secure_file_priv_null.test @@ -0,0 +1,42 @@ +--source include/no_valgrind_without_big.inc +--source include/not_embedded.inc + +--echo #----------------------------------------------------------------------- +--echo # Setup +let server_log= $MYSQLTEST_VARDIR/log/mysqld.1.err; +let SEARCH_FILE= $server_log; +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--echo #----------------------------------------------------------------------- + +--echo # Search for : --secure-file-priv is set to NULL. Operations +--echo # related to importing and exporting data are +--echo # disabled +let SEARCH_PATTERN= --secure-file-priv is set to NULL. Operations related to importing and exporting data are disabled; +--source include/search_pattern_in_file.inc + +connect(test4_con,localhost,root,,,,,); +show variables like 'secure_file_priv'; + +use test; +--disable_warnings +drop table if exists secure_file_priv_test_null; +--enable_warnings +create table secure_file_priv_test_null(c1 int); +insert into secure_file_priv_test_null values (1), (2), (3), (4); +--error 1290 +select * from secure_file_priv_test_null into outfile 'blah'; +--error 1290 +select * from secure_file_priv_test_null into outfile 'null/blah'; +drop table secure_file_priv_test_null; + +connection default; +disconnect test4_con; + +--echo #----------------------------------------------------------------------- + +--echo # Clean-up +--disable_warnings +--source include/force_restart.inc +--enable_warnings + +--echo #----------------------------------------------------------------------- diff --git a/mysql-test/suite/auth_sec/t/secure_file_priv_warnings-master.opt b/mysql-test/suite/auth_sec/t/secure_file_priv_warnings-master.opt new file mode 100644 index 0000000000000..22520f0aa9901 --- /dev/null +++ b/mysql-test/suite/auth_sec/t/secure_file_priv_warnings-master.opt @@ -0,0 +1 @@ +--secure-file-priv="" diff --git a/mysql-test/suite/auth_sec/t/secure_file_priv_warnings.test b/mysql-test/suite/auth_sec/t/secure_file_priv_warnings.test new file mode 100644 index 0000000000000..cc7a79d5b3c18 --- /dev/null +++ b/mysql-test/suite/auth_sec/t/secure_file_priv_warnings.test @@ -0,0 +1,47 @@ +--source include/no_valgrind_without_big.inc +--source include/not_embedded.inc + +--echo #----------------------------------------------------------------------- +--echo # Setup +let server_log= $MYSQLTEST_VARDIR/log/mysqld.1.err; +let SEARCH_FILE= $server_log; +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +let PLUGIN_DIR= $MYSQLTEST_VARDIR/tmp; +--echo #----------------------------------------------------------------------- + +--echo # Search for : Insecure configuration for --secure-file-priv: Current +--echo # value does not restrict location of generated files. +--echo # Consider setting it to a valid, non-empty path. +let SEARCH_PATTERN= Insecure configuration for --secure-file-priv: Current value does not restrict location of generated files. Consider setting it to a valid, non-empty path.; +--source include/search_pattern_in_file.inc + +# Must show empty string +SHOW VARIABLES LIKE 'secure_file_priv'; + +--echo #----------------------------------------------------------------------- + +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server +--source include/wait_until_disconnected.inc +--remove_file $server_log +--exec echo "restart:--plugin-dir=$PLUGIN_DIR --secure-file-priv=$PLUGIN_DIR" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect +--echo # Restart completed. + +--echo # Search for : Insecure configuration for --secure-file-priv: Plugin +--echo # directory is accessible through --secure-file-priv. +--echo # Consider choosing a different directory. +let SEARCH_PATTERN= Insecure configuration for --secure-file-priv: Plugin directory is accessible through --secure-file-priv. Consider choosing a different directory.; +--source include/search_pattern_in_file.inc + +--echo #----------------------------------------------------------------------- + +--echo # Clean-up +--disable_warnings +--source include/force_restart.inc +--enable_warnings + +--echo #----------------------------------------------------------------------- diff --git a/mysql-test/suite/auth_sec/t/secure_file_priv_warnings_not_win.test b/mysql-test/suite/auth_sec/t/secure_file_priv_warnings_not_win.test new file mode 100644 index 0000000000000..ec027d4a743f5 --- /dev/null +++ b/mysql-test/suite/auth_sec/t/secure_file_priv_warnings_not_win.test @@ -0,0 +1,24 @@ +--source include/no_valgrind_without_big.inc +--source include/not_windows.inc +--source include/not_embedded.inc + +let server_log= $MYSQLTEST_VARDIR/log/mysqld.1.err; +let SEARCH_FILE= $server_log; + +--echo #----------------------------------------------------------------------- + +--echo # Search for : Insecure configuration for --secure-file-priv: Data +--echo # directory is accessible through --secure-file-priv. +--echo # Consider choosing a different directory. +let SEARCH_PATTERN= Insecure configuration for --secure-file-priv: Data directory is accessible through --secure-file-priv. Consider choosing a different directory.; +--source include/search_pattern_in_file.inc + +--echo #----------------------------------------------------------------------- + +--echo # Search for : Insecure configuration for --secure-file-priv: Location +--echo # is accessible to all OS users. Consider choosing a +--echo # different directory. +let SEARCH_PATTERN= Insecure configuration for --secure-file-priv: Location is accessible to all OS users. Consider choosing a different directory.; +--source include/search_pattern_in_file.inc + +--echo #----------------------------------------------------------------------- diff --git a/mysql-test/suite/auth_sec/t/secure_file_priv_warnings_win.test b/mysql-test/suite/auth_sec/t/secure_file_priv_warnings_win.test new file mode 100644 index 0000000000000..bb175fb40ea3f --- /dev/null +++ b/mysql-test/suite/auth_sec/t/secure_file_priv_warnings_win.test @@ -0,0 +1,35 @@ +--source include/no_valgrind_without_big.inc +--source include/windows.inc +--source include/not_embedded.inc + +let server_log= $MYSQLTEST_VARDIR/log/mysqld.1.err; +let SEARCH_FILE= $server_log; + +--echo #----------------------------------------------------------------------- + +--echo # Test 2 : Restarting mysqld with : +--echo # --secure-file-priv=MYSQLTEST_VARDIR/mysqld.1/Data + +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server +--source include/wait_until_disconnected.inc +--error 0,1 +--remove_file $server_log +--exec echo "restart: --secure-file-priv=$MYSQLTEST_VARDIR/mysqld.1/Data" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect +--echo # Restart completed. + +--echo # Search for : Insecure configuration for --secure-file-priv: Data +--echo # directory is accessible through --secure-file-priv. +--echo # Consider choosing a different directory. +let SEARCH_PATTERN= Insecure configuration for --secure-file-priv: Data directory is accessible through --secure-file-priv. Consider choosing a different directory.; +--source include/search_pattern_in_file.inc + +--disable_warnings +--source include/force_restart.inc +--enable_warnings + +--echo #----------------------------------------------------------------------- diff --git a/packaging/rpm-oel/mysql-systemd-start b/packaging/rpm-oel/mysql-systemd-start index fab7b3627b31a..231a76087ac3e 100644 --- a/packaging/rpm-oel/mysql-systemd-start +++ b/packaging/rpm-oel/mysql-systemd-start @@ -30,6 +30,12 @@ install_db () { if [ -x /usr/sbin/restorecon ]; then /usr/sbin/restorecon "$datadir" /usr/sbin/restorecon $log + for dir in /var/lib/mysql-files ; do + if [ -x /usr/sbin/semanage -a -d /var/lib/mysql -a -d $dir ] ; then + /usr/sbin/semanage fcontext -a -e /var/lib/mysql $dir >/dev/null 2>&1 + /sbin/restorecon $dir + fi + done fi # If special mysql dir is in place, skip db install diff --git a/packaging/rpm-oel/mysql.init b/packaging/rpm-oel/mysql.init index aaea498d15339..75ae672801b77 100644 --- a/packaging/rpm-oel/mysql.init +++ b/packaging/rpm-oel/mysql.init @@ -82,7 +82,15 @@ start(){ fi chown mysql:mysql "$datadir" chmod 0755 "$datadir" - [ -x /sbin/restorecon ] && /sbin/restorecon "$datadir" + if [ -x /sbin/restorecon ]; then + /sbin/restorecon "$datadir" + for dir in /var/lib/mysql-files ; do + if [ -x /usr/sbin/semanage -a -d /var/lib/mysql -a -d $dir ] ; then + /usr/sbin/semanage fcontext -a -e /var/lib/mysql $dir >/dev/null 2>&1 + /sbin/restorecon $dir + fi + done + fi # Now create the database action $"Initializing MySQL database: " /usr/bin/mysql_install_db --rpm --datadir="$datadir" --user=mysql ret=$? diff --git a/packaging/rpm-oel/mysql.spec.in b/packaging/rpm-oel/mysql.spec.in index 409c325b6759f..7ef294ffa8492 100644 --- a/packaging/rpm-oel/mysql.spec.in +++ b/packaging/rpm-oel/mysql.spec.in @@ -560,6 +560,7 @@ MBD=$RPM_BUILD_DIR/%{src_dir} install -d -m 0755 %{buildroot}%{_datadir}/mysql/SELinux/RHEL4 install -d -m 0755 %{buildroot}/var/lib/mysql install -d -m 0755 %{buildroot}/var/run/mysqld +install -d -m 0750 %{buildroot}/var/lib/mysql-files # Install all binaries cd $MBD/release @@ -790,6 +791,7 @@ fi %attr(644, root, root) %config(noreplace,missingok) %{_sysconfdir}/logrotate.d/mysql %dir %attr(755, mysql, mysql) /var/lib/mysql %dir %attr(755, mysql, mysql) /var/run/mysqld +%dir %attr(750, mysql, mysql) /var/lib/mysql-files %files common %defattr(-, root, root, -) @@ -916,6 +918,9 @@ fi %endif %changelog +* Mon Sep 26 2016 Balasubramanian Kandasamy - 5.5.53-1 +- Include mysql-files directory + * Tue Jul 05 2016 Balasubramanian Kandasamy - 5.5.51-1 - Remove mysql_config from client subpackage diff --git a/packaging/rpm-sles/mysql.spec.in b/packaging/rpm-sles/mysql.spec.in index a11dfff7b70e0..6652cdcccb614 100644 --- a/packaging/rpm-sles/mysql.spec.in +++ b/packaging/rpm-sles/mysql.spec.in @@ -425,6 +425,7 @@ MBD=$RPM_BUILD_DIR/%{src_dir} install -d -m 0755 %{buildroot}/var/lib/mysql install -d -m 0755 %{buildroot}/var/run/mysql install -d -m 0750 %{buildroot}/var/log/mysql +install -d -m 0750 %{buildroot}/var/lib/mysql-files # Install all binaries cd $MBD/release @@ -638,6 +639,7 @@ fi %dir %attr(755, mysql, mysql) /var/lib/mysql %dir %attr(755, mysql, mysql) /var/run/mysql %dir %attr(750, mysql, mysql) /var/log/mysql +%dir %attr(750, mysql, mysql) /var/lib/mysql-files %files common %defattr(-, root, root, -) @@ -783,6 +785,9 @@ fi %attr(755, root, root) %{_libdir}/mysql/libmysqld.so %changelog +* Mon Sep 26 2016 Balasubramanian Kandasamy - 5.5.53-1 +- Include mysql-files directory + * Tue Sep 29 2015 Balasubramanian Kandasamy - 5.5.47-1 - Added conflicts to mysql-connector-c-shared dependencies diff --git a/packaging/solaris/postinstall-solaris.sh b/packaging/solaris/postinstall-solaris.sh index b024d94f15854..a31e151e1bb32 100644 --- a/packaging/solaris/postinstall-solaris.sh +++ b/packaging/solaris/postinstall-solaris.sh @@ -1,6 +1,6 @@ #!/bin/sh # -# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -26,6 +26,7 @@ mygroup=mysql myuser=mysql mydatadir=/var/lib/mysql basedir=@@basedir@@ +mysecurefiledir=/var/lib/mysql-files if [ -n "$BASEDIR" ] ; then basedir="$BASEDIR" @@ -58,6 +59,11 @@ fi chown -R $myuser:$mygroup $mydatadir +# Create securefile directory +[ -d "$mysecurefiledir" ] || mkdir -p -m 770 "$mysecurefiledir" || exit 1 +chown -R $myuser:$mygroup $mysecurefiledir + + # Solaris patch 119255 (somewhere around revision 42) changes the behaviour # of pkgadd to set TMPDIR internally to a root-owned install directory. This # has the unfortunate side effect of breaking running mysql_install_db with diff --git a/sql/mysqld.cc b/sql/mysqld.cc index e979ea1b731a9..2429db0774b4a 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -570,6 +570,7 @@ uint mysql_real_data_home_len, mysql_data_home_len= 1; uint reg_ext_length; const key_map key_map_empty(0); key_map key_map_full(0); // Will be initialized later +char secure_file_real_path[FN_REFLEN]; DATE_TIME_FORMAT global_date_format, global_datetime_format, global_time_format; Time_zone *default_tz; @@ -7598,9 +7599,9 @@ bool is_secure_file_path(char *path) char buff1[FN_REFLEN], buff2[FN_REFLEN]; size_t opt_secure_file_priv_len; /* - All paths are secure if opt_secure_file_path is 0 + All paths are secure if opt_secure_file_priv is 0 */ - if (!opt_secure_file_priv) + if (!opt_secure_file_priv[0]) return TRUE; opt_secure_file_priv_len= strlen(opt_secure_file_priv); @@ -7608,6 +7609,9 @@ bool is_secure_file_path(char *path) if (strlen(path) >= FN_REFLEN) return FALSE; + if (!my_strcasecmp(system_charset_info, opt_secure_file_priv, "NULL")) + return FALSE; + if (my_realpath(buff1, path, 0)) { /* @@ -7640,9 +7644,184 @@ bool is_secure_file_path(char *path) } +/** + check_secure_file_priv_path : Checks path specified through + --secure-file-priv and raises warning in following cases: + 1. If path is empty string or NULL and mysqld is not running + with --bootstrap mode. + 2. If path can access data directory + 3. If path points to a directory which is accessible by + all OS users (non-Windows build only) + + It throws error in following cases: + + 1. If path normalization fails + 2. If it can not get stats of the directory + + @params NONE + + Assumptions : + 1. Data directory path has been normalized + 2. opt_secure_file_priv has been normalized unless it is set + to "NULL". + + @returns Status of validation + @retval true : Validation is successful with/without warnings + @retval false : Validation failed. Error is raised. +*/ + +bool check_secure_file_priv_path() +{ + char datadir_buffer[FN_REFLEN+1]={0}; + char plugindir_buffer[FN_REFLEN+1]={0}; + char whichdir[20]= {0}; + size_t opt_plugindir_len= 0; + size_t opt_datadir_len= 0; + size_t opt_secure_file_priv_len= 0; + bool warn= false; + bool case_insensitive_fs; +#ifndef _WIN32 + MY_STAT dir_stat; +#endif + + if (!opt_secure_file_priv[0]) + { + if (opt_bootstrap) + { + /* + Do not impose --secure-file-priv restriction + in --bootstrap mode + */ + sql_print_information("Ignoring --secure-file-priv value as server is " + "running with --bootstrap."); + } + else + { + sql_print_warning("Insecure configuration for --secure-file-priv: " + "Current value does not restrict location of generated " + "files. Consider setting it to a valid, " + "non-empty path."); + } + return true; + } + + /* + Setting --secure-file-priv to NULL would disable + reading/writing from/to file + */ + if(!my_strcasecmp(system_charset_info, opt_secure_file_priv, "NULL")) + { + sql_print_information("--secure-file-priv is set to NULL. " + "Operations related to importing and exporting " + "data are disabled"); + return true; + } + + /* + Check if --secure-file-priv can access data directory + */ + opt_secure_file_priv_len= strlen(opt_secure_file_priv); + + /* + Adds dir seperator at the end. + This is required in subsequent comparison + */ + convert_dirname(datadir_buffer, mysql_unpacked_real_data_home, NullS); + opt_datadir_len= strlen(datadir_buffer); + + case_insensitive_fs= + (test_if_case_insensitive(datadir_buffer) == 1); + + if (!case_insensitive_fs) + { + if (!strncmp(datadir_buffer, opt_secure_file_priv, + opt_datadir_len < opt_secure_file_priv_len ? + opt_datadir_len : opt_secure_file_priv_len)) + { + warn= true; + strcpy(whichdir, "Data directory"); + } + } + else + { + if (!files_charset_info->coll->strnncoll(files_charset_info, + (uchar *) datadir_buffer, + opt_datadir_len, + (uchar *) opt_secure_file_priv, + opt_secure_file_priv_len, + TRUE)) + { + warn= true; + strcpy(whichdir, "Data directory"); + } + } + + /* + Don't bother comparing --secure-file-priv with --plugin-dir + if we already have a match against --datadir or + --plugin-dir is not pointing to a valid directory. + */ + if (!warn && !my_realpath(plugindir_buffer, opt_plugin_dir, 0)) + { + convert_dirname(plugindir_buffer, plugindir_buffer, NullS); + opt_plugindir_len= strlen(plugindir_buffer); + + if (!case_insensitive_fs) + { + if (!strncmp(plugindir_buffer, opt_secure_file_priv, + opt_plugindir_len < opt_secure_file_priv_len ? + opt_plugindir_len : opt_secure_file_priv_len)) + { + warn= true; + strcpy(whichdir, "Plugin directory"); + } + } + else + { + if (!files_charset_info->coll->strnncoll(files_charset_info, + (uchar *) plugindir_buffer, + opt_plugindir_len, + (uchar *) opt_secure_file_priv, + opt_secure_file_priv_len, + TRUE)) + { + warn= true; + strcpy(whichdir, "Plugin directory"); + } + } + } + + + if (warn) + sql_print_warning("Insecure configuration for --secure-file-priv: " + "%s is accessible through " + "--secure-file-priv. Consider choosing a different " + "directory.", whichdir); + +#ifndef _WIN32 + /* + Check for --secure-file-priv directory's permission + */ + if (!(my_stat(opt_secure_file_priv, &dir_stat, MYF(0)))) + { + sql_print_error("Failed to get stat for directory pointed out " + "by --secure-file-priv"); + return false; + } + + if (dir_stat.st_mode & S_IRWXO) + sql_print_warning("Insecure configuration for --secure-file-priv: " + "Location is accessible to all OS users. " + "Consider choosing a different directory."); +#endif + return true; +} + + static int fix_paths(void) { char buff[FN_REFLEN],*pos; + bool secure_file_priv_nonempty= false; convert_dirname(mysql_home,mysql_home,NullS); /* Resolve symlinks to allow 'mysql_home' to be a relative symlink */ my_realpath(mysql_home,mysql_home,MYF(0)); @@ -7700,29 +7879,56 @@ static int fix_paths(void) Convert the secure-file-priv option to system format, allowing a quick strcmp to check if read or write is in an allowed dir */ - if (opt_secure_file_priv) + if (opt_bootstrap) + opt_secure_file_priv= EMPTY_STR.str; + secure_file_priv_nonempty= opt_secure_file_priv[0] ? true : false; + + if (secure_file_priv_nonempty && strlen(opt_secure_file_priv) > FN_REFLEN) { - if (*opt_secure_file_priv == 0) - { - my_free(opt_secure_file_priv); - opt_secure_file_priv= 0; - } - else + sql_print_warning("Value for --secure-file-priv is longer than maximum " + "limit of %d", FN_REFLEN-1); + return 1; + } + + memset(buff, 0, sizeof(buff)); + if (secure_file_priv_nonempty && + my_strcasecmp(system_charset_info, opt_secure_file_priv, "NULL")) + { + int retval= my_realpath(buff, opt_secure_file_priv, MYF(MY_WME)); + if (!retval) { - if (strlen(opt_secure_file_priv) >= FN_REFLEN) - opt_secure_file_priv[FN_REFLEN-1]= '\0'; - if (my_realpath(buff, opt_secure_file_priv, 0)) + convert_dirname(secure_file_real_path, buff, NullS); +#ifdef WIN32 + MY_DIR *dir= my_dir(secure_file_real_path, MYF(MY_DONT_SORT+MY_WME)); + if (!dir) { - sql_print_warning("Failed to normalize the argument for --secure-file-priv."); - return 1; + retval= 1; } - char *secure_file_real_path= (char *)my_malloc(FN_REFLEN, MYF(MY_FAE)); - convert_dirname(secure_file_real_path, buff, NullS); - my_free(opt_secure_file_priv); - opt_secure_file_priv= secure_file_real_path; + else + { + my_dirend(dir); + } +#endif + } + + if (retval) + { + char err_buffer[FN_REFLEN]; + my_snprintf(err_buffer, FN_REFLEN-1, + "Failed to access directory for --secure-file-priv." + " Please make sure that directory exists and is " + "accessible by MySQL Server. Supplied value : %s", + opt_secure_file_priv); + err_buffer[FN_REFLEN-1]='\0'; + sql_print_error("%s", err_buffer); + return 1; } + opt_secure_file_priv= secure_file_real_path; } - + + if (!check_secure_file_priv_path()) + return 1; + return 0; } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 0696021cfc09b..d9fda85d8f669 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -68,6 +68,8 @@ char internal_table_name[2]= "*"; char empty_c_string[1]= {0}; /* used for not defined db */ +LEX_STRING EMPTY_STR= { (char *) "", 0 }; + const char * const THD::DEFAULT_WHERE= "field list"; diff --git a/sql/sql_class.h b/sql/sql_class.h index dcc7458ee5043..aa6745e4564ea 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -105,6 +105,7 @@ enum enum_filetype { FILETYPE_CSV, FILETYPE_XML }; extern char internal_table_name[2]; extern char empty_c_string[1]; +extern LEX_STRING EMPTY_STR; extern MYSQL_PLUGIN_IMPORT const char **errmesg; extern bool volatile shutdown_in_progress; diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index d08cb4f8ca838..6fd728d638dea 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -1941,8 +1941,12 @@ static Sys_var_charptr Sys_secure_file_priv( "secure_file_priv", "Limit LOAD DATA, SELECT ... OUTFILE, and LOAD_FILE() to files " "within specified directory", - PREALLOCATED READ_ONLY GLOBAL_VAR(opt_secure_file_priv), - CMD_LINE(REQUIRED_ARG), IN_FS_CHARSET, DEFAULT(0)); + READ_ONLY GLOBAL_VAR(opt_secure_file_priv), +#ifndef EMBEDDED_LIBRARY + CMD_LINE(REQUIRED_ARG), IN_FS_CHARSET, DEFAULT(DEFAULT_SECURE_FILE_PRIV_DIR)); +#else + CMD_LINE(REQUIRED_ARG), IN_FS_CHARSET, DEFAULT(DEFAULT_SECURE_FILE_PRIV_EMBEDDED_DIR)); +#endif static bool fix_server_id(sys_var *self, THD *thd, enum_var_type type) { diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 5af4783f9195a..211ed4f38881d 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -1,4 +1,4 @@ -# Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -562,6 +562,7 @@ install -d $RBR%{_includedir} install -d $RBR%{_libdir} install -d $RBR%{_mandir} install -d $RBR%{_sbindir} +install -d $RBR/var/lib/mysql-files mkdir -p $RBR%{_sysconfdir}/my.cnf.d @@ -1141,6 +1142,7 @@ echo "=====" >> $STATUS_HISTORY %attr(755, root, root) %{_sysconfdir}/init.d/mysql %attr(755, root, root) %{_datadir}/mysql/ +%dir %attr(750, mysql, mysql) /var/lib/mysql-files # ---------------------------------------------------------------------------- %files -n MySQL-client%{product_suffix} @@ -1226,6 +1228,9 @@ echo "=====" >> $STATUS_HISTORY # merging BK trees) ############################################################################## %changelog +* Mon Sep 26 2016 Balasubramanian Kandasamy +- Include mysql-files directory + * Wed Jul 02 2014 Bjorn Munch - Disable dtrace unconditionally, breaks after we install Oracle dtrace From 5c6169fb309981b564a17bee31b367a18866d674 Mon Sep 17 00:00:00 2001 From: Robert Golebiowski Date: Tue, 27 Sep 2016 11:17:38 +0200 Subject: [PATCH 05/44] Bug #24740291: YASSL UPDATE TO 2.4.2 --- extra/yassl/README | 18 +++ extra/yassl/certs/dsa-cert.pem | 38 ++--- extra/yassl/include/openssl/ssl.h | 2 +- extra/yassl/src/ssl.cpp | 60 +++++--- extra/yassl/taocrypt/include/aes.hpp | 58 ++++++++ extra/yassl/taocrypt/include/integer.hpp | 3 + extra/yassl/taocrypt/src/aes.cpp | 172 ++++++++++++++--------- extra/yassl/taocrypt/src/asn.cpp | 24 ++-- extra/yassl/taocrypt/src/dsa.cpp | 16 ++- extra/yassl/taocrypt/test/test.cpp | 3 + extra/yassl/testsuite/test.hpp | 2 +- 11 files changed, 274 insertions(+), 122 deletions(-) diff --git a/extra/yassl/README b/extra/yassl/README index b5eb88824fb0d..a3d4f60f56128 100644 --- a/extra/yassl/README +++ b/extra/yassl/README @@ -12,6 +12,24 @@ before calling SSL_new(); *** end Note *** +yaSSL Release notes, version 2.4.2 (9/22/2016) + This release of yaSSL fixes a medium security vulnerability. A fix for + potential AES side channel leaks is included that a local user monitoring + the same CPU core cache could exploit. VM users, hyper-threading users, + and users where potential attackers have access to the CPU cache will need + to update if they utilize AES. + + DSA padding fixes for unusual sizes is included as well. Users with DSA + certficiates should update. + +yaSSL Release notes, version 2.4.0 (5/20/2016) + This release of yaSSL fixes the OpenSSL compatibility function + SSL_CTX_load_verify_locations() when using the path directory to allow + unlimited path sizes. Minor Windows build fixes are included. + No high level security fixes in this version but we always recommend + updating. + + yaSSL Release notes, version 2.3.9b (2/03/2016) This release of yaSSL fixes the OpenSSL compatibility function X509_NAME_get_index_by_NID() to use the actual index of the common name diff --git a/extra/yassl/certs/dsa-cert.pem b/extra/yassl/certs/dsa-cert.pem index 10d533edc88b0..10794cbee7313 100644 --- a/extra/yassl/certs/dsa-cert.pem +++ b/extra/yassl/certs/dsa-cert.pem @@ -1,22 +1,22 @@ -----BEGIN CERTIFICATE----- -MIIDqzCCA2ugAwIBAgIJAMGqrgDU6DyhMAkGByqGSM44BAMwgY4xCzAJBgNVBAYT +MIIDrzCCA2+gAwIBAgIJAK1zRM7YFcNjMAkGByqGSM44BAMwgZAxCzAJBgNVBAYT AlVTMQ8wDQYDVQQIDAZPcmVnb24xETAPBgNVBAcMCFBvcnRsYW5kMRAwDgYDVQQK -DAd3b2xmU1NMMRAwDgYDVQQLDAd0ZXN0aW5nMRYwFAYDVQQDDA13d3cueWFzc2wu -Y29tMR8wHQYJKoZIhvcNAQkBFhBpbmZvQHdvbGZzc2wuY29tMB4XDTEzMDQyMjIw -MDk0NFoXDTE2MDExNzIwMDk0NFowgY4xCzAJBgNVBAYTAlVTMQ8wDQYDVQQIDAZP -cmVnb24xETAPBgNVBAcMCFBvcnRsYW5kMRAwDgYDVQQKDAd3b2xmU1NMMRAwDgYD -VQQLDAd0ZXN0aW5nMRYwFAYDVQQDDA13d3cueWFzc2wuY29tMR8wHQYJKoZIhvcN -AQkBFhBpbmZvQHdvbGZzc2wuY29tMIIBuDCCASwGByqGSM44BAEwggEfAoGBAL1R -7koy4IrH6sbh6nDEUUPPKgfhxxLCWCVexF2+qzANEr+hC9M002haJXFOfeS9DyoO -WFbL0qMZOuqv+22CaHnoUWl7q3PjJOAI3JH0P54ZyUPuU1909RzgTdIDp5+ikbr7 -KYjnltL73FQVMbjTZQKthIpPn3MjYcF+4jp2W2zFAhUAkcntYND6MGf+eYzIJDN2 -L7SonHUCgYEAklpxErfqznIZjVvqqHFaq+mgAL5J8QrKVmdhYZh/Y8z4jCjoCA8o -TDoFKxf7s2ZzgaPKvglaEKiYqLqic9qY78DYJswzQMLFvjsF4sFZ+pYCBdWPQI4N -PgxCiznK6Ce+JH9ikSBvMvG+tevjr2UpawDIHX3+AWYaZBZwKADAaboDgYUAAoGB -AJ3LY89yHyvQ/TsQ6zlYbovjbk/ogndsMqPdNUvL4RuPTgJP/caaDDa0XJ7ak6A7 -TJ+QheLNwOXoZPYJC4EGFSDAXpYniGhbWIrVTCGe6lmZDfnx40WXS0kk3m/DHaC0 -3ElLAiybxVGxyqoUfbT3Zv1JwftWMuiqHH5uADhdXuXVo1AwTjAdBgNVHQ4EFgQU -IJjk416o4v8qpH9LBtXlR9v8gccwHwYDVR0jBBgwFoAUIJjk416o4v8qpH9LBtXl -R9v8gccwDAYDVR0TBAUwAwEB/zAJBgcqhkjOOAQDAy8AMCwCFCjGKIdOSV12LcTu -k08owGM6YkO1AhQe+K173VuaO/OsDNsxZlKpyH8+1g== +DAd3b2xmU1NMMRAwDgYDVQQLDAd0ZXN0aW5nMRgwFgYDVQQDDA93d3cud29sZnNz +bC5jb20xHzAdBgkqhkiG9w0BCQEWEGluZm9Ad29sZnNzbC5jb20wHhcNMTYwOTIy +MjEyMzA0WhcNMjIwMzE1MjEyMzA0WjCBkDELMAkGA1UEBhMCVVMxDzANBgNVBAgM +Bk9yZWdvbjERMA8GA1UEBwwIUG9ydGxhbmQxEDAOBgNVBAoMB3dvbGZTU0wxEDAO +BgNVBAsMB3Rlc3RpbmcxGDAWBgNVBAMMD3d3dy53b2xmc3NsLmNvbTEfMB0GCSqG +SIb3DQEJARYQaW5mb0B3b2xmc3NsLmNvbTCCAbgwggEsBgcqhkjOOAQBMIIBHwKB +gQC9Ue5KMuCKx+rG4epwxFFDzyoH4ccSwlglXsRdvqswDRK/oQvTNNNoWiVxTn3k +vQ8qDlhWy9KjGTrqr/ttgmh56FFpe6tz4yTgCNyR9D+eGclD7lNfdPUc4E3SA6ef +opG6+ymI55bS+9xUFTG402UCrYSKT59zI2HBfuI6dltsxQIVAJHJ7WDQ+jBn/nmM +yCQzdi+0qJx1AoGBAJJacRK36s5yGY1b6qhxWqvpoAC+SfEKylZnYWGYf2PM+Iwo +6AgPKEw6BSsX+7Nmc4Gjyr4JWhComKi6onPamO/A2CbMM0DCxb47BeLBWfqWAgXV +j0CODT4MQos5yugnviR/YpEgbzLxvrXr469lKWsAyB19/gFmGmQWcCgAwGm6A4GF +AAKBgQCdy2PPch8r0P07EOs5WG6L425P6IJ3bDKj3TVLy+Ebj04CT/3Gmgw2tFye +2pOgO0yfkIXizcDl6GT2CQuBBhUgwF6WJ4hoW1iK1UwhnupZmQ358eNFl0tJJN5v +wx2gtNxJSwIsm8VRscqqFH2092b9ScH7VjLoqhx+bgA4XV7l1aNQME4wHQYDVR0O +BBYEFCCY5ONeqOL/KqR/SwbV5Ufb/IHHMB8GA1UdIwQYMBaAFCCY5ONeqOL/KqR/ +SwbV5Ufb/IHHMAwGA1UdEwQFMAMBAf8wCQYHKoZIzjgEAwMvADAsAhQRYSCVN/Ge +agV3mffU3qNZ92fI0QIUPH7Jp+iASI7U1ocaYDc10qXGaGY= -----END CERTIFICATE----- diff --git a/extra/yassl/include/openssl/ssl.h b/extra/yassl/include/openssl/ssl.h index 83daf3cc81f9e..0609dfc0592f5 100644 --- a/extra/yassl/include/openssl/ssl.h +++ b/extra/yassl/include/openssl/ssl.h @@ -35,7 +35,7 @@ #include "rsa.h" -#define YASSL_VERSION "2.3.9b" +#define YASSL_VERSION "2.4.2" #if defined(__cplusplus) diff --git a/extra/yassl/src/ssl.cpp b/extra/yassl/src/ssl.cpp index cde32df4f43f4..1925e2f759267 100644 --- a/extra/yassl/src/ssl.cpp +++ b/extra/yassl/src/ssl.cpp @@ -161,7 +161,7 @@ int read_file(SSL_CTX* ctx, const char* file, int format, CertType type) TaoCrypt::DSA_PrivateKey dsaKey; dsaKey.Initialize(dsaSource); - if (rsaSource.GetError().What()) { + if (dsaSource.GetError().What()) { // neither worked ret = SSL_FAILURE; } @@ -784,40 +784,67 @@ int SSL_CTX_load_verify_locations(SSL_CTX* ctx, const char* file, WIN32_FIND_DATA FindFileData; HANDLE hFind; - char name[MAX_PATH + 1]; // directory specification - strncpy(name, path, MAX_PATH - 3); - strncat(name, "\\*", 3); + const int DELIMITER_SZ = 2; + const int DELIMITER_STAR_SZ = 3; + int pathSz = (int)strlen(path); + int nameSz = pathSz + DELIMITER_STAR_SZ + 1; // plus 1 for terminator + char* name = NEW_YS char[nameSz]; // directory specification + memset(name, 0, nameSz); + strncpy(name, path, nameSz - DELIMITER_STAR_SZ - 1); + strncat(name, "\\*", DELIMITER_STAR_SZ); hFind = FindFirstFile(name, &FindFileData); - if (hFind == INVALID_HANDLE_VALUE) return SSL_BAD_PATH; + if (hFind == INVALID_HANDLE_VALUE) { + ysArrayDelete(name); + return SSL_BAD_PATH; + } do { - if (FindFileData.dwFileAttributes != FILE_ATTRIBUTE_DIRECTORY) { - strncpy(name, path, MAX_PATH - 2 - HALF_PATH); - strncat(name, "\\", 2); - strncat(name, FindFileData.cFileName, HALF_PATH); + if (!(FindFileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) { + int curSz = (int)strlen(FindFileData.cFileName); + if (pathSz + curSz + DELIMITER_SZ + 1 > nameSz) { + ysArrayDelete(name); + // plus 1 for terminator + nameSz = pathSz + curSz + DELIMITER_SZ + 1; + name = NEW_YS char[nameSz]; + } + memset(name, 0, nameSz); + strncpy(name, path, nameSz - curSz - DELIMITER_SZ - 1); + strncat(name, "\\", DELIMITER_SZ); + strncat(name, FindFileData.cFileName, + nameSz - pathSz - DELIMITER_SZ - 1); ret = read_file(ctx, name, SSL_FILETYPE_PEM, CA); } } while (ret == SSL_SUCCESS && FindNextFile(hFind, &FindFileData)); + ysArrayDelete(name); FindClose(hFind); #else // _WIN32 - - const int MAX_PATH = 260; - DIR* dir = opendir(path); if (!dir) return SSL_BAD_PATH; struct dirent* entry; struct stat buf; - char name[MAX_PATH + 1]; + const int DELIMITER_SZ = 1; + int pathSz = (int)strlen(path); + int nameSz = pathSz + DELIMITER_SZ + 1; //plus 1 for null terminator + char* name = NEW_YS char[nameSz]; // directory specification while (ret == SSL_SUCCESS && (entry = readdir(dir))) { - strncpy(name, path, MAX_PATH - 1 - HALF_PATH); - strncat(name, "/", 1); - strncat(name, entry->d_name, HALF_PATH); + int curSz = (int)strlen(entry->d_name); + if (pathSz + curSz + DELIMITER_SZ + 1 > nameSz) { + ysArrayDelete(name); + nameSz = pathSz + DELIMITER_SZ + curSz + 1; + name = NEW_YS char[nameSz]; + } + memset(name, 0, nameSz); + strncpy(name, path, nameSz - curSz - 1); + strncat(name, "/", DELIMITER_SZ); + strncat(name, entry->d_name, nameSz - pathSz - DELIMITER_SZ - 1); + if (stat(name, &buf) < 0) { + ysArrayDelete(name); closedir(dir); return SSL_BAD_STAT; } @@ -826,6 +853,7 @@ int SSL_CTX_load_verify_locations(SSL_CTX* ctx, const char* file, ret = read_file(ctx, name, SSL_FILETYPE_PEM, CA); } + ysArrayDelete(name); closedir(dir); #endif diff --git a/extra/yassl/taocrypt/include/aes.hpp b/extra/yassl/taocrypt/include/aes.hpp index 017630331560b..bccf6e73fc720 100644 --- a/extra/yassl/taocrypt/include/aes.hpp +++ b/extra/yassl/taocrypt/include/aes.hpp @@ -60,6 +60,7 @@ class AES : public Mode_BASE { static const word32 Te[5][256]; static const word32 Td[5][256]; + static const byte CTd4[256]; static const word32* Te0; static const word32* Te1; @@ -80,11 +81,68 @@ class AES : public Mode_BASE { void ProcessAndXorBlock(const byte*, const byte*, byte*) const; + word32 PreFetchTe() const; + word32 PreFetchTd() const; + word32 PreFetchCTd4() const; + AES(const AES&); // hide copy AES& operator=(const AES&); // and assign }; +#if defined(__x86_64__) || defined(_M_X64) || \ + (defined(__ILP32__) && (__ILP32__ >= 1)) + #define TC_CACHE_LINE_SZ 64 +#else + /* default cache line size */ + #define TC_CACHE_LINE_SZ 32 +#endif + +inline word32 AES::PreFetchTe() const +{ + word32 x = 0; + + /* 4 tables of 256 entries */ + for (int i = 0; i < 4; i++) { + /* each entry is 4 bytes */ + for (int j = 0; j < 256; j += TC_CACHE_LINE_SZ/4) { + x &= Te[i][j]; + } + } + + return x; +} + + +inline word32 AES::PreFetchTd() const +{ + word32 x = 0; + + /* 4 tables of 256 entries */ + for (int i = 0; i < 4; i++) { + /* each entry is 4 bytes */ + for (int j = 0; j < 256; j += TC_CACHE_LINE_SZ/4) { + x &= Td[i][j]; + } + } + + return x; +} + + +inline word32 AES::PreFetchCTd4() const +{ + word32 x = 0; + int i; + + for (i = 0; i < 256; i += TC_CACHE_LINE_SZ) { + x &= CTd4[i]; + } + + return x; +} + + typedef BlockCipher AES_ECB_Encryption; typedef BlockCipher AES_ECB_Decryption; diff --git a/extra/yassl/taocrypt/include/integer.hpp b/extra/yassl/taocrypt/include/integer.hpp index 75a3ee3d3df80..05fe189fd585f 100644 --- a/extra/yassl/taocrypt/include/integer.hpp +++ b/extra/yassl/taocrypt/include/integer.hpp @@ -119,6 +119,9 @@ namespace TaoCrypt { +#ifdef _WIN32 + #undef max // avoid name clash +#endif // general MAX template inline const T& max(const T& a, const T& b) diff --git a/extra/yassl/taocrypt/src/aes.cpp b/extra/yassl/taocrypt/src/aes.cpp index ee4c7a6e8a1d3..3fcf80ac20207 100644 --- a/extra/yassl/taocrypt/src/aes.cpp +++ b/extra/yassl/taocrypt/src/aes.cpp @@ -109,10 +109,10 @@ void AES::SetKey(const byte* userKey, word32 keylen, CipherDir /*dummy*/) { temp = rk[3]; rk[4] = rk[0] ^ - (Te4[GETBYTE(temp, 2)] & 0xff000000) ^ - (Te4[GETBYTE(temp, 1)] & 0x00ff0000) ^ - (Te4[GETBYTE(temp, 0)] & 0x0000ff00) ^ - (Te4[GETBYTE(temp, 3)] & 0x000000ff) ^ + (Te2[GETBYTE(temp, 2)] & 0xff000000) ^ + (Te3[GETBYTE(temp, 1)] & 0x00ff0000) ^ + (Te0[GETBYTE(temp, 0)] & 0x0000ff00) ^ + (Te1[GETBYTE(temp, 3)] & 0x000000ff) ^ rcon_[i]; rk[5] = rk[1] ^ rk[4]; rk[6] = rk[2] ^ rk[5]; @@ -128,10 +128,10 @@ void AES::SetKey(const byte* userKey, word32 keylen, CipherDir /*dummy*/) { temp = rk[ 5]; rk[ 6] = rk[ 0] ^ - (Te4[GETBYTE(temp, 2)] & 0xff000000) ^ - (Te4[GETBYTE(temp, 1)] & 0x00ff0000) ^ - (Te4[GETBYTE(temp, 0)] & 0x0000ff00) ^ - (Te4[GETBYTE(temp, 3)] & 0x000000ff) ^ + (Te2[GETBYTE(temp, 2)] & 0xff000000) ^ + (Te3[GETBYTE(temp, 1)] & 0x00ff0000) ^ + (Te0[GETBYTE(temp, 0)] & 0x0000ff00) ^ + (Te1[GETBYTE(temp, 3)] & 0x000000ff) ^ rcon_[i]; rk[ 7] = rk[ 1] ^ rk[ 6]; rk[ 8] = rk[ 2] ^ rk[ 7]; @@ -149,10 +149,10 @@ void AES::SetKey(const byte* userKey, word32 keylen, CipherDir /*dummy*/) { temp = rk[ 7]; rk[ 8] = rk[ 0] ^ - (Te4[GETBYTE(temp, 2)] & 0xff000000) ^ - (Te4[GETBYTE(temp, 1)] & 0x00ff0000) ^ - (Te4[GETBYTE(temp, 0)] & 0x0000ff00) ^ - (Te4[GETBYTE(temp, 3)] & 0x000000ff) ^ + (Te2[GETBYTE(temp, 2)] & 0xff000000) ^ + (Te3[GETBYTE(temp, 1)] & 0x00ff0000) ^ + (Te0[GETBYTE(temp, 0)] & 0x0000ff00) ^ + (Te1[GETBYTE(temp, 3)] & 0x000000ff) ^ rcon_[i]; rk[ 9] = rk[ 1] ^ rk[ 8]; rk[10] = rk[ 2] ^ rk[ 9]; @@ -161,10 +161,10 @@ void AES::SetKey(const byte* userKey, word32 keylen, CipherDir /*dummy*/) break; temp = rk[11]; rk[12] = rk[ 4] ^ - (Te4[GETBYTE(temp, 3)] & 0xff000000) ^ - (Te4[GETBYTE(temp, 2)] & 0x00ff0000) ^ - (Te4[GETBYTE(temp, 1)] & 0x0000ff00) ^ - (Te4[GETBYTE(temp, 0)] & 0x000000ff); + (Te2[GETBYTE(temp, 3)] & 0xff000000) ^ + (Te3[GETBYTE(temp, 2)] & 0x00ff0000) ^ + (Te0[GETBYTE(temp, 1)] & 0x0000ff00) ^ + (Te1[GETBYTE(temp, 0)] & 0x000000ff); rk[13] = rk[ 5] ^ rk[12]; rk[14] = rk[ 6] ^ rk[13]; rk[15] = rk[ 7] ^ rk[14]; @@ -191,25 +191,25 @@ void AES::SetKey(const byte* userKey, word32 keylen, CipherDir /*dummy*/) for (i = 1; i < rounds_; i++) { rk += 4; rk[0] = - Td0[Te4[GETBYTE(rk[0], 3)] & 0xff] ^ - Td1[Te4[GETBYTE(rk[0], 2)] & 0xff] ^ - Td2[Te4[GETBYTE(rk[0], 1)] & 0xff] ^ - Td3[Te4[GETBYTE(rk[0], 0)] & 0xff]; + Td0[Te1[GETBYTE(rk[0], 3)] & 0xff] ^ + Td1[Te1[GETBYTE(rk[0], 2)] & 0xff] ^ + Td2[Te1[GETBYTE(rk[0], 1)] & 0xff] ^ + Td3[Te1[GETBYTE(rk[0], 0)] & 0xff]; rk[1] = - Td0[Te4[GETBYTE(rk[1], 3)] & 0xff] ^ - Td1[Te4[GETBYTE(rk[1], 2)] & 0xff] ^ - Td2[Te4[GETBYTE(rk[1], 1)] & 0xff] ^ - Td3[Te4[GETBYTE(rk[1], 0)] & 0xff]; + Td0[Te1[GETBYTE(rk[1], 3)] & 0xff] ^ + Td1[Te1[GETBYTE(rk[1], 2)] & 0xff] ^ + Td2[Te1[GETBYTE(rk[1], 1)] & 0xff] ^ + Td3[Te1[GETBYTE(rk[1], 0)] & 0xff]; rk[2] = - Td0[Te4[GETBYTE(rk[2], 3)] & 0xff] ^ - Td1[Te4[GETBYTE(rk[2], 2)] & 0xff] ^ - Td2[Te4[GETBYTE(rk[2], 1)] & 0xff] ^ - Td3[Te4[GETBYTE(rk[2], 0)] & 0xff]; + Td0[Te1[GETBYTE(rk[2], 3)] & 0xff] ^ + Td1[Te1[GETBYTE(rk[2], 2)] & 0xff] ^ + Td2[Te1[GETBYTE(rk[2], 1)] & 0xff] ^ + Td3[Te1[GETBYTE(rk[2], 0)] & 0xff]; rk[3] = - Td0[Te4[GETBYTE(rk[3], 3)] & 0xff] ^ - Td1[Te4[GETBYTE(rk[3], 2)] & 0xff] ^ - Td2[Te4[GETBYTE(rk[3], 1)] & 0xff] ^ - Td3[Te4[GETBYTE(rk[3], 0)] & 0xff]; + Td0[Te1[GETBYTE(rk[3], 3)] & 0xff] ^ + Td1[Te1[GETBYTE(rk[3], 2)] & 0xff] ^ + Td2[Te1[GETBYTE(rk[3], 1)] & 0xff] ^ + Td3[Te1[GETBYTE(rk[3], 0)] & 0xff]; } } } @@ -244,6 +244,7 @@ void AES::encrypt(const byte* inBlock, const byte* xorBlock, s2 ^= rk[2]; s3 ^= rk[3]; + s0 |= PreFetchTe(); /* * Nr - 1 full rounds: */ @@ -312,28 +313,28 @@ void AES::encrypt(const byte* inBlock, const byte* xorBlock, */ s0 = - (Te4[GETBYTE(t0, 3)] & 0xff000000) ^ - (Te4[GETBYTE(t1, 2)] & 0x00ff0000) ^ - (Te4[GETBYTE(t2, 1)] & 0x0000ff00) ^ - (Te4[GETBYTE(t3, 0)] & 0x000000ff) ^ + (Te2[GETBYTE(t0, 3)] & 0xff000000) ^ + (Te3[GETBYTE(t1, 2)] & 0x00ff0000) ^ + (Te0[GETBYTE(t2, 1)] & 0x0000ff00) ^ + (Te1[GETBYTE(t3, 0)] & 0x000000ff) ^ rk[0]; s1 = - (Te4[GETBYTE(t1, 3)] & 0xff000000) ^ - (Te4[GETBYTE(t2, 2)] & 0x00ff0000) ^ - (Te4[GETBYTE(t3, 1)] & 0x0000ff00) ^ - (Te4[GETBYTE(t0, 0)] & 0x000000ff) ^ + (Te2[GETBYTE(t1, 3)] & 0xff000000) ^ + (Te3[GETBYTE(t2, 2)] & 0x00ff0000) ^ + (Te0[GETBYTE(t3, 1)] & 0x0000ff00) ^ + (Te1[GETBYTE(t0, 0)] & 0x000000ff) ^ rk[1]; s2 = - (Te4[GETBYTE(t2, 3)] & 0xff000000) ^ - (Te4[GETBYTE(t3, 2)] & 0x00ff0000) ^ - (Te4[GETBYTE(t0, 1)] & 0x0000ff00) ^ - (Te4[GETBYTE(t1, 0)] & 0x000000ff) ^ + (Te2[GETBYTE(t2, 3)] & 0xff000000) ^ + (Te3[GETBYTE(t3, 2)] & 0x00ff0000) ^ + (Te0[GETBYTE(t0, 1)] & 0x0000ff00) ^ + (Te1[GETBYTE(t1, 0)] & 0x000000ff) ^ rk[2]; s3 = - (Te4[GETBYTE(t3, 3)] & 0xff000000) ^ - (Te4[GETBYTE(t0, 2)] & 0x00ff0000) ^ - (Te4[GETBYTE(t1, 1)] & 0x0000ff00) ^ - (Te4[GETBYTE(t2, 0)] & 0x000000ff) ^ + (Te2[GETBYTE(t3, 3)] & 0xff000000) ^ + (Te3[GETBYTE(t0, 2)] & 0x00ff0000) ^ + (Te0[GETBYTE(t1, 1)] & 0x0000ff00) ^ + (Te1[GETBYTE(t2, 0)] & 0x000000ff) ^ rk[3]; @@ -358,6 +359,8 @@ void AES::decrypt(const byte* inBlock, const byte* xorBlock, s2 ^= rk[2]; s3 ^= rk[3]; + s0 |= PreFetchTd(); + /* * Nr - 1 full rounds: */ @@ -423,29 +426,32 @@ void AES::decrypt(const byte* inBlock, const byte* xorBlock, * apply last round and * map cipher state to byte array block: */ + + t0 |= PreFetchCTd4(); + s0 = - (Td4[GETBYTE(t0, 3)] & 0xff000000) ^ - (Td4[GETBYTE(t3, 2)] & 0x00ff0000) ^ - (Td4[GETBYTE(t2, 1)] & 0x0000ff00) ^ - (Td4[GETBYTE(t1, 0)] & 0x000000ff) ^ + ((word32)CTd4[GETBYTE(t0, 3)] << 24) ^ + ((word32)CTd4[GETBYTE(t3, 2)] << 16) ^ + ((word32)CTd4[GETBYTE(t2, 1)] << 8) ^ + ((word32)CTd4[GETBYTE(t1, 0)]) ^ rk[0]; s1 = - (Td4[GETBYTE(t1, 3)] & 0xff000000) ^ - (Td4[GETBYTE(t0, 2)] & 0x00ff0000) ^ - (Td4[GETBYTE(t3, 1)] & 0x0000ff00) ^ - (Td4[GETBYTE(t2, 0)] & 0x000000ff) ^ + ((word32)CTd4[GETBYTE(t1, 3)] << 24) ^ + ((word32)CTd4[GETBYTE(t0, 2)] << 16) ^ + ((word32)CTd4[GETBYTE(t3, 1)] << 8) ^ + ((word32)CTd4[GETBYTE(t2, 0)]) ^ rk[1]; s2 = - (Td4[GETBYTE(t2, 3)] & 0xff000000) ^ - (Td4[GETBYTE(t1, 2)] & 0x00ff0000) ^ - (Td4[GETBYTE(t0, 1)] & 0x0000ff00) ^ - (Td4[GETBYTE(t3, 0)] & 0x000000ff) ^ + ((word32)CTd4[GETBYTE(t2, 3)] << 24 ) ^ + ((word32)CTd4[GETBYTE(t1, 2)] << 16 ) ^ + ((word32)CTd4[GETBYTE(t0, 1)] << 8 ) ^ + ((word32)CTd4[GETBYTE(t3, 0)]) ^ rk[2]; s3 = - (Td4[GETBYTE(t3, 3)] & 0xff000000) ^ - (Td4[GETBYTE(t2, 2)] & 0x00ff0000) ^ - (Td4[GETBYTE(t1, 1)] & 0x0000ff00) ^ - (Td4[GETBYTE(t0, 0)] & 0x000000ff) ^ + ((word32)CTd4[GETBYTE(t3, 3)] << 24) ^ + ((word32)CTd4[GETBYTE(t2, 2)] << 16) ^ + ((word32)CTd4[GETBYTE(t1, 1)] << 8) ^ + ((word32)CTd4[GETBYTE(t0, 0)]) ^ rk[3]; gpBlock::Put(xorBlock, outBlock)(s0)(s1)(s2)(s3); @@ -1826,18 +1832,52 @@ const word32 AES::Td[5][256] = { } }; +const byte AES::CTd4[256] = +{ + 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U, + 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU, + 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U, + 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU, + 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU, + 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU, + 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U, + 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U, + 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U, + 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U, + 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU, + 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U, + 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU, + 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U, + 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U, + 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU, + 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU, + 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U, + 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U, + 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU, + 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U, + 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU, + 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U, + 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U, + 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U, + 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU, + 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU, + 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU, + 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U, + 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U, + 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U, + 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU, +}; + const word32* AES::Te0 = AES::Te[0]; const word32* AES::Te1 = AES::Te[1]; const word32* AES::Te2 = AES::Te[2]; const word32* AES::Te3 = AES::Te[3]; -const word32* AES::Te4 = AES::Te[4]; const word32* AES::Td0 = AES::Td[0]; const word32* AES::Td1 = AES::Td[1]; const word32* AES::Td2 = AES::Td[2]; const word32* AES::Td3 = AES::Td[3]; -const word32* AES::Td4 = AES::Td[4]; diff --git a/extra/yassl/taocrypt/src/asn.cpp b/extra/yassl/taocrypt/src/asn.cpp index a210d805452f3..7ff3c7167d23b 100644 --- a/extra/yassl/taocrypt/src/asn.cpp +++ b/extra/yassl/taocrypt/src/asn.cpp @@ -1209,17 +1209,17 @@ word32 DecodeDSA_Signature(byte* decoded, const byte* encoded, word32 sz) } word32 rLen = GetLength(source); if (rLen != 20) { - if (rLen == 21) { // zero at front, eat + while (rLen > 20 && source.remaining() > 0) { // zero's at front, eat source.next(); --rLen; } - else if (rLen == 19) { // add zero to front so 20 bytes + if (rLen < 20) { // add zero's to front so 20 bytes + word32 tmpLen = rLen; + while (tmpLen < 20) { decoded[0] = 0; decoded++; + tmpLen++; } - else { - source.SetError(DSA_SZ_E); - return 0; } } memcpy(decoded, source.get_buffer() + source.get_index(), rLen); @@ -1232,17 +1232,17 @@ word32 DecodeDSA_Signature(byte* decoded, const byte* encoded, word32 sz) } word32 sLen = GetLength(source); if (sLen != 20) { - if (sLen == 21) { - source.next(); // zero at front, eat + while (sLen > 20 && source.remaining() > 0) { + source.next(); // zero's at front, eat --sLen; } - else if (sLen == 19) { - decoded[rLen] = 0; // add zero to front so 20 bytes + if (sLen < 20) { // add zero's to front so 20 bytes + word32 tmpLen = sLen; + while (tmpLen < 20) { + decoded[rLen] = 0; decoded++; + tmpLen++; } - else { - source.SetError(DSA_SZ_E); - return 0; } } memcpy(decoded + rLen, source.get_buffer() + source.get_index(), sLen); diff --git a/extra/yassl/taocrypt/src/dsa.cpp b/extra/yassl/taocrypt/src/dsa.cpp index bf116d3e48d12..b19fed9235b24 100644 --- a/extra/yassl/taocrypt/src/dsa.cpp +++ b/extra/yassl/taocrypt/src/dsa.cpp @@ -172,6 +172,7 @@ word32 DSA_Signer::Sign(const byte* sha_digest, byte* sig, const Integer& q = key_.GetSubGroupOrder(); const Integer& g = key_.GetSubGroupGenerator(); const Integer& x = key_.GetPrivatePart(); + byte* tmpPtr = sig; // initial signature output Integer k(rng, 1, q - 1); @@ -187,22 +188,23 @@ word32 DSA_Signer::Sign(const byte* sha_digest, byte* sig, return -1; int rSz = r_.ByteCount(); + int tmpSz = rSz; - if (rSz == 19) { - sig[0] = 0; - sig++; + while (tmpSz++ < SHA::DIGEST_SIZE) { + *sig++ = 0; } r_.Encode(sig, rSz); + sig = tmpPtr + SHA::DIGEST_SIZE; // advance sig output to s int sSz = s_.ByteCount(); + tmpSz = sSz; - if (sSz == 19) { - sig[rSz] = 0; - sig++; + while (tmpSz++ < SHA::DIGEST_SIZE) { + *sig++ = 0; } - s_.Encode(sig + rSz, sSz); + s_.Encode(sig, sSz); return 40; } diff --git a/extra/yassl/taocrypt/test/test.cpp b/extra/yassl/taocrypt/test/test.cpp index a7d5cb3e8af35..fc1f0e8762ddc 100644 --- a/extra/yassl/taocrypt/test/test.cpp +++ b/extra/yassl/taocrypt/test/test.cpp @@ -1277,6 +1277,9 @@ int dsa_test() if (!verifier.Verify(digest, decoded)) return -90; + if (!verifier.Verify(digest, signature)) + return -91; + return 0; } diff --git a/extra/yassl/testsuite/test.hpp b/extra/yassl/testsuite/test.hpp index 5c9dc7ce117ef..e2e44c24027df 100644 --- a/extra/yassl/testsuite/test.hpp +++ b/extra/yassl/testsuite/test.hpp @@ -22,7 +22,6 @@ #define yaSSL_TEST_HPP #include "runtime.hpp" -#include "openssl/ssl.h" /* openssl compatibility test */ #include "error.hpp" #include #include @@ -56,6 +55,7 @@ #endif #define SOCKET_T int #endif /* _WIN32 */ +#include "openssl/ssl.h" /* openssl compatibility test */ #ifdef _MSC_VER From 9b20d606fb1afd0327356e7c78c2aea774dec3d4 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 5 Oct 2016 23:44:54 +0200 Subject: [PATCH 06/44] - Fix MDEV-10948. Syntax error on quoted JDBC tables. Was because the quoting character was always '"' instead of being retrieve from the JDBC source. modified: storage/connect/JdbcInterface.java modified: storage/connect/jdbconn.cpp modified: storage/connect/tabjdbc.cpp --- storage/connect/JdbcInterface.java | 12 ++++++++++++ storage/connect/jdbconn.cpp | 15 +++++++++++++++ storage/connect/tabjdbc.cpp | 19 +++++++++++++++---- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/storage/connect/JdbcInterface.java b/storage/connect/JdbcInterface.java index f765052915d72..e339c9891133a 100644 --- a/storage/connect/JdbcInterface.java +++ b/storage/connect/JdbcInterface.java @@ -340,6 +340,18 @@ public int GetMaxValue(int n) { return m; } // end of GetMaxValue + public String GetQuoteString() { + String qs = null; + + try { + qs = dbmd.getIdentifierQuoteString(); + } catch(SQLException se) { + SetErrmsg(se); + } // end try/catch + + return qs; + } // end of GetQuoteString + public int GetColumns(String[] parms) { int ncol = -1; diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index 952847507a0b3..229ade53ad1cf 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -1011,6 +1011,21 @@ int JDBConn::Open(PJPARM sop) return RC_FX; } // endif Msg + jmethodID qcid = nullptr; + + if (!gmID(g, qcid, "GetQuoteString", "()Ljava/lang/String;")) { + jstring s = (jstring)env->CallObjectMethod(job, qcid); + + if (s != nullptr) { + char *qch = (char*)env->GetStringUTFChars(s, (jboolean)false); + m_IDQuoteChar[0] = *qch; + } else { + s = (jstring)env->CallObjectMethod(job, errid); + Msg = (char*)env->GetStringUTFChars(s, (jboolean)false); + } // endif s + + } // endif qcid + if (gmID(g, typid, "ColumnType", "(ILjava/lang/String;)I")) return RC_FX; else diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index 86fd831b26201..e398523892f2e 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -686,6 +686,9 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) else Prepared = true; + if (trace) + htrc("Insert=%s\n", Query->GetStr()); + return false; } // end of MakeInsert @@ -733,17 +736,18 @@ bool TDBJDBC::MakeCommand(PGLOBAL g) // If so, it must be quoted in the original query strlwr(strcat(strcat(strcpy(name, " "), Name), " ")); - if (!strstr(" update delete low_priority ignore quick from ", name)) - strlwr(strcpy(name, Name)); // Not a keyword - else + if (strstr(" update delete low_priority ignore quick from ", name)) { strlwr(strcat(strcat(strcpy(name, qc), Name), qc)); + k += 2; + } else + strlwr(strcpy(name, Name)); // Not a keyword if ((p = strstr(qrystr, name))) { for (i = 0; i < p - qrystr; i++) stmt[i] = (Qrystr[i] == '`') ? *qc : Qrystr[i]; stmt[i] = 0; - k = i + (int)strlen(Name); + k += i + (int)strlen(Name); if (qtd && *(p-1) == ' ') strcat(strcat(strcat(stmt, qc), TableName), qc); @@ -765,6 +769,9 @@ bool TDBJDBC::MakeCommand(PGLOBAL g) return NULL; } // endif p + if (trace) + htrc("Command=%s\n", stmt); + Query = new(g)STRING(g, 0, stmt); return (!Query->GetSize()); } // end of MakeCommand @@ -1214,6 +1221,10 @@ int TDBJDBC::WriteDB(PGLOBAL g) } // endif oom Query->RepLast(')'); + + if (trace > 1) + htrc("Inserting: %s\n", Query->GetStr()); + rc = Jcp->ExecuteUpdate(Query->GetStr()); Query->Truncate(len); // Restore query From 6010a27c87785643f8880d19c0dced3b724c54da Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 13 Oct 2016 12:23:16 +0200 Subject: [PATCH 07/44] 5.5.52-38.3 --- storage/xtradb/btr/btr0btr.c | 4 +- storage/xtradb/handler/ha_innodb.cc | 108 +++++++++++++++++++++++----- storage/xtradb/include/buf0buf.h | 12 ++++ storage/xtradb/include/buf0buf.ic | 14 ++++ storage/xtradb/include/srv0srv.h | 8 +++ storage/xtradb/include/univ.i | 2 +- storage/xtradb/log/log0log.c | 8 ++- storage/xtradb/log/log0online.c | 12 ++-- storage/xtradb/log/log0recv.c | 2 +- storage/xtradb/mach/mach0data.c | 13 +++- storage/xtradb/srv/srv0srv.c | 42 +++++++---- 11 files changed, 179 insertions(+), 46 deletions(-) diff --git a/storage/xtradb/btr/btr0btr.c b/storage/xtradb/btr/btr0btr.c index dec42f27d3b99..0c4293637897f 100644 --- a/storage/xtradb/btr/btr0btr.c +++ b/storage/xtradb/btr/btr0btr.c @@ -76,7 +76,7 @@ btr_corruption_report( buf_block_get_zip_size(block), BUF_PAGE_PRINT_NO_CRASH); } - buf_page_print(buf_block_get_frame(block), 0, 0); + buf_page_print(buf_nonnull_block_get_frame(block), 0, 0); } #ifndef UNIV_HOTBACKUP @@ -1077,7 +1077,7 @@ btr_get_size( SRV_CORRUPT_TABLE_CHECK(root, { mtr_commit(mtr); - return(0); + return(ULINT_UNDEFINED); }); if (flag == BTR_N_LEAF_PAGES) { diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 896b27a204785..010aec1ea0db5 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -475,6 +475,19 @@ innobase_is_fake_change( THD* thd); /*!< in: MySQL thread handle of the user for whom the transaction is being committed */ +/** Get the list of foreign keys referencing a specified table +table. +@param thd The thread handle +@param path Path to the table +@param f_key_list[out] The list of foreign keys + +@return error code or zero for success */ +static +int +innobase_get_parent_fk_list( + THD* thd, + const char* path, + List* f_key_list); /******************************************************************//** Maps a MySQL trx isolation level code to the InnoDB isolation level code @@ -2710,6 +2723,7 @@ innobase_init( innobase_hton->purge_changed_page_bitmaps = innobase_purge_changed_page_bitmaps; innobase_hton->is_fake_change = innobase_is_fake_change; + innobase_hton->get_parent_fk_list = innobase_get_parent_fk_list; ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR); @@ -9721,7 +9735,14 @@ ha_innobase::check( prebuilt->select_lock_type = LOCK_NONE; - if (!row_check_index_for_mysql(prebuilt, index, &n_rows)) { + bool check_result + = row_check_index_for_mysql(prebuilt, index, &n_rows); + DBUG_EXECUTE_IF( + "dict_set_index_corrupted", + if (!(index->type & DICT_CLUSTERED)) { + check_result = false; + }); + if (!check_result) { innobase_format_name( index_name, sizeof index_name, index->name, TRUE); @@ -10057,6 +10078,73 @@ get_foreign_key_info( return(pf_key_info); } +/** Get the list of foreign keys referencing a specified table +table. +@param thd The thread handle +@param path Path to the table +@param f_key_list[out] The list of foreign keys */ +static +void +fill_foreign_key_list(THD* thd, + const dict_table_t* table, + List* f_key_list) +{ + ut_ad(mutex_own(&dict_sys->mutex)); + + for (dict_foreign_t* foreign + = UT_LIST_GET_FIRST(table->referenced_list); + foreign != NULL; + foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + + FOREIGN_KEY_INFO* pf_key_info + = get_foreign_key_info(thd, foreign); + if (pf_key_info) { + f_key_list->push_back(pf_key_info); + } + } +} + +/** Get the list of foreign keys referencing a specified table +table. +@param thd The thread handle +@param path Path to the table +@param f_key_list[out] The list of foreign keys + +@return error code or zero for success */ +static +int +innobase_get_parent_fk_list( + THD* thd, + const char* path, + List* f_key_list) +{ + ut_a(strlen(path) <= FN_REFLEN); + char norm_name[FN_REFLEN + 1]; + normalize_table_name(norm_name, path); + + trx_t* parent_trx = check_trx_exists(thd); + parent_trx->op_info = "getting list of referencing foreign keys"; + trx_search_latch_release_if_reserved(parent_trx); + + mutex_enter(&dict_sys->mutex); + + dict_table_t* table + = dict_table_get_low(norm_name, + static_cast( + DICT_ERR_IGNORE_INDEX_ROOT + | DICT_ERR_IGNORE_CORRUPT)); + if (!table) { + mutex_exit(&dict_sys->mutex); + return(HA_ERR_NO_SUCH_TABLE); + } + + fill_foreign_key_list(thd, table, f_key_list); + + mutex_exit(&dict_sys->mutex); + parent_trx->op_info = ""; + return(0); +} + /*******************************************************************//** Gets the list of foreign keys in this table. @return always 0, that is, always succeeds */ @@ -10105,9 +10193,6 @@ ha_innobase::get_parent_foreign_key_list( THD* thd, /*!< in: user thread handle */ List* f_key_list) /*!< out: foreign key list */ { - FOREIGN_KEY_INFO* pf_key_info; - dict_foreign_t* foreign; - ut_a(prebuilt != NULL); update_thd(ha_thd()); @@ -10116,16 +10201,7 @@ ha_innobase::get_parent_foreign_key_list( trx_search_latch_release_if_reserved(prebuilt->trx); mutex_enter(&(dict_sys->mutex)); - - for (foreign = UT_LIST_GET_FIRST(prebuilt->table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { - pf_key_info = get_foreign_key_info(thd, foreign); - if (pf_key_info) { - f_key_list->push_back(pf_key_info); - } - } - + fill_foreign_key_list(thd, prebuilt->table, f_key_list); mutex_exit(&(dict_sys->mutex)); prebuilt->trx->op_info = ""; @@ -12539,7 +12615,6 @@ innodb_track_changed_pages_validate( for update function */ struct st_mysql_value* value) /*!< in: incoming bool */ { - static bool enabled_on_startup = false; long long intbuf = 0; if (value->val_int(value, &intbuf)) { @@ -12547,8 +12622,7 @@ innodb_track_changed_pages_validate( return 1; } - if (srv_track_changed_pages || enabled_on_startup) { - enabled_on_startup = true; + if (srv_redo_log_thread_started) { *reinterpret_cast(save) = static_cast(intbuf); return 0; diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h index 77025c1637396..23692c92c09ca 100644 --- a/storage/xtradb/include/buf0buf.h +++ b/storage/xtradb/include/buf0buf.h @@ -1110,8 +1110,20 @@ buf_block_get_frame( /*================*/ const buf_block_t* block) /*!< in: pointer to the control block */ __attribute__((pure)); + +/*********************************************************************//** +Gets a pointer to the memory frame of a block, where block is known not to be +NULL. +@return pointer to the frame */ +UNIV_INLINE +buf_frame_t* +buf_nonnull_block_get_frame( + const buf_block_t* block) /*!< in: pointer to the control block */ + __attribute__((pure)); + #else /* UNIV_DEBUG */ # define buf_block_get_frame(block) (block ? (block)->frame : 0) +# define buf_nonnull_block_get_frame(block) ((block)->frame) #endif /* UNIV_DEBUG */ /*********************************************************************//** Gets the space id of a block. diff --git a/storage/xtradb/include/buf0buf.ic b/storage/xtradb/include/buf0buf.ic index f214112c7ce96..fae44a1ac4a2b 100644 --- a/storage/xtradb/include/buf0buf.ic +++ b/storage/xtradb/include/buf0buf.ic @@ -718,6 +718,19 @@ buf_block_get_frame( { SRV_CORRUPT_TABLE_CHECK(block, return(0);); + return(buf_nonnull_block_get_frame(block)); +} + +/*********************************************************************//** +Gets a pointer to the memory frame of a block, where block is known not to be +NULL. +@return pointer to the frame */ +UNIV_INLINE +buf_frame_t* +buf_nonnull_block_get_frame( +/*========================*/ + const buf_block_t* block) /*!< in: pointer to the control block */ +{ switch (buf_block_get_state(block)) { case BUF_BLOCK_ZIP_FREE: case BUF_BLOCK_ZIP_PAGE: @@ -739,6 +752,7 @@ buf_block_get_frame( ok: return((buf_frame_t*) block->frame); } + #endif /* UNIV_DEBUG */ /*********************************************************************//** diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h index 3ccad0640b663..12ab9d9ed876f 100644 --- a/storage/xtradb/include/srv0srv.h +++ b/storage/xtradb/include/srv0srv.h @@ -74,6 +74,11 @@ extern os_event_t srv_checkpoint_completed_event; that the (slow) shutdown may proceed */ extern os_event_t srv_redo_log_thread_finished_event; +/** Whether the redo log tracker thread has been started. Does not take into +account whether the tracking is currently enabled (see srv_track_changed_pages +for that) */ +extern my_bool srv_redo_log_thread_started; + /* If the last data file is auto-extended, we add this many pages to it at a time */ #define SRV_AUTO_EXTEND_INCREMENT \ @@ -141,6 +146,9 @@ extern char* srv_doublewrite_file; extern ibool srv_recovery_stats; +/** Whether the redo log tracking is currently enabled. Note that it is +possible for the log tracker thread to be running and the tracking to be +disabled */ extern my_bool srv_track_changed_pages; extern ib_uint64_t srv_max_bitmap_file_size; diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index b158a12027f9c..cc589166f8de7 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -64,7 +64,7 @@ component, i.e. we show M.N.P as M.N */ (INNODB_VERSION_MAJOR << 8 | INNODB_VERSION_MINOR) #ifndef PERCONA_INNODB_VERSION -#define PERCONA_INNODB_VERSION 38.0 +#define PERCONA_INNODB_VERSION 38.3 #endif #define INNODB_VERSION_STR MYSQL_SERVER_VERSION diff --git a/storage/xtradb/log/log0log.c b/storage/xtradb/log/log0log.c index b4b48a065f985..49ee8407b2c09 100644 --- a/storage/xtradb/log/log0log.c +++ b/storage/xtradb/log/log0log.c @@ -3326,6 +3326,8 @@ logs_empty_and_mark_files_at_shutdown(void) algorithm only works if the server is idle at shutdown */ srv_shutdown_state = SRV_SHUTDOWN_CLEANUP; + + srv_wake_purge_thread(); loop: os_thread_sleep(100000); @@ -3499,7 +3501,7 @@ logs_empty_and_mark_files_at_shutdown(void) srv_shutdown_state = SRV_SHUTDOWN_LAST_PHASE; /* Wake the log tracking thread which will then immediatelly quit because of srv_shutdown_state value */ - if (srv_track_changed_pages) { + if (srv_redo_log_thread_started) { os_event_set(srv_checkpoint_completed_event); os_event_wait(srv_redo_log_thread_finished_event); } @@ -3576,7 +3578,7 @@ logs_empty_and_mark_files_at_shutdown(void) srv_shutdown_state = SRV_SHUTDOWN_LAST_PHASE; /* Signal the log following thread to quit */ - if (srv_track_changed_pages) { + if (srv_redo_log_thread_started) { os_event_set(srv_checkpoint_completed_event); } @@ -3600,7 +3602,7 @@ logs_empty_and_mark_files_at_shutdown(void) fil_flush_file_spaces(FIL_TABLESPACE); - if (srv_track_changed_pages) { + if (srv_redo_log_thread_started) { os_event_wait(srv_redo_log_thread_finished_event); } diff --git a/storage/xtradb/log/log0online.c b/storage/xtradb/log/log0online.c index d0127488f6791..fa2c8b882bfd6 100644 --- a/storage/xtradb/log/log0online.c +++ b/storage/xtradb/log/log0online.c @@ -1813,7 +1813,7 @@ log_online_purge_changed_page_bitmaps( lsn = IB_ULONGLONG_MAX; } - if (srv_track_changed_pages) { + if (srv_redo_log_thread_started) { /* User requests might happen with both enabled and disabled tracking */ mutex_enter(&log_bmp_sys->mutex); @@ -1821,13 +1821,13 @@ log_online_purge_changed_page_bitmaps( if (!log_online_setup_bitmap_file_range(&bitmap_files, 0, IB_ULONGLONG_MAX)) { - if (srv_track_changed_pages) { + if (srv_redo_log_thread_started) { mutex_exit(&log_bmp_sys->mutex); } return TRUE; } - if (srv_track_changed_pages && lsn > log_bmp_sys->end_lsn) { + if (srv_redo_log_thread_started && lsn > log_bmp_sys->end_lsn) { /* If we have to delete the current output file, close it first. */ os_file_close(log_bmp_sys->out.file); @@ -1858,7 +1858,7 @@ log_online_purge_changed_page_bitmaps( } } - if (srv_track_changed_pages) { + if (srv_redo_log_thread_started) { if (lsn > log_bmp_sys->end_lsn) { ib_uint64_t new_file_lsn; if (lsn == IB_ULONGLONG_MAX) { @@ -1869,9 +1869,7 @@ log_online_purge_changed_page_bitmaps( new_file_lsn = log_bmp_sys->end_lsn; } if (!log_online_rotate_bitmap_file(new_file_lsn)) { - /* If file create failed, signal the log - tracking thread to quit next time it wakes - up. */ + /* If file create failed, stop log tracking */ srv_track_changed_pages = FALSE; } } diff --git a/storage/xtradb/log/log0recv.c b/storage/xtradb/log/log0recv.c index 6c2a121967e6f..527e7b3af0f6e 100644 --- a/storage/xtradb/log/log0recv.c +++ b/storage/xtradb/log/log0recv.c @@ -3015,7 +3015,7 @@ recv_recovery_from_checkpoint_start_func( ib_uint64_t checkpoint_lsn; ib_uint64_t checkpoint_no; ib_uint64_t old_scanned_lsn; - ib_uint64_t group_scanned_lsn; + ib_uint64_t group_scanned_lsn = 0; ib_uint64_t contiguous_lsn; #ifdef UNIV_LOG_ARCHIVE ib_uint64_t archived_lsn; diff --git a/storage/xtradb/mach/mach0data.c b/storage/xtradb/mach/mach0data.c index 95b135b09541e..00378f036c957 100644 --- a/storage/xtradb/mach/mach0data.c +++ b/storage/xtradb/mach/mach0data.c @@ -56,7 +56,18 @@ mach_parse_compressed( *val = flag; return(ptr + 1); - } else if (flag < 0xC0UL) { + } + + /* Workaround GCC bug + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=77673: + the compiler moves mach_read_from_4 right to the beginning of the + function, causing and out-of-bounds read if we are reading a short + integer close to the end of buffer. */ +#if defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__clang__) + asm volatile("": : :"memory"); +#endif + + if (flag < 0xC0UL) { if (end_ptr < ptr + 2) { return(NULL); } diff --git a/storage/xtradb/srv/srv0srv.c b/storage/xtradb/srv/srv0srv.c index 26ad32cb1d1ed..0acce91f2c4b9 100644 --- a/storage/xtradb/srv/srv0srv.c +++ b/storage/xtradb/srv/srv0srv.c @@ -179,6 +179,9 @@ UNIV_INTERN char* srv_doublewrite_file = NULL; UNIV_INTERN ibool srv_recovery_stats = FALSE; +/** Whether the redo log tracking is currently enabled. Note that it is +possible for the log tracker thread to be running and the tracking to be +disabled */ UNIV_INTERN my_bool srv_track_changed_pages = FALSE; UNIV_INTERN ib_uint64_t srv_max_bitmap_file_size = 100 * 1024 * 1024; @@ -809,6 +812,11 @@ UNIV_INTERN os_event_t srv_checkpoint_completed_event; UNIV_INTERN os_event_t srv_redo_log_thread_finished_event; +/** Whether the redo log tracker thread has been started. Does not take into +account whether the tracking is currently enabled (see srv_track_changed_pages +for that) */ +UNIV_INTERN my_bool srv_redo_log_thread_started = FALSE; + UNIV_INTERN srv_sys_t* srv_sys = NULL; /* padding to prevent other memory update hotspots from residing on @@ -3179,18 +3187,15 @@ srv_redo_log_follow_thread( #endif my_thread_init(); + srv_redo_log_thread_started = TRUE; do { os_event_wait(srv_checkpoint_completed_event); os_event_reset(srv_checkpoint_completed_event); -#ifdef UNIV_DEBUG - if (!srv_track_changed_pages) { - continue; - } -#endif + if (srv_track_changed_pages + && srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE) { - if (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE) { if (!log_online_follow_redo_log()) { /* TODO: sync with I_S log tracking status? */ fprintf(stderr, @@ -3206,6 +3211,7 @@ srv_redo_log_follow_thread( srv_track_changed_pages = FALSE; log_online_read_shutdown(); os_event_set(srv_redo_log_thread_finished_event); + srv_redo_log_thread_started = FALSE; /* Defensive, not required */ my_thread_end(); os_thread_exit(NULL); @@ -3327,7 +3333,7 @@ srv_master_do_purge(void) ut_ad(!mutex_own(&kernel_mutex)); - ut_a(srv_n_purge_threads == 0 || (srv_shutdown_state > 0 && srv_n_threads_active[SRV_WORKER] == 0)); + ut_a(srv_n_purge_threads == 0); do { /* Check for shutdown and change in purge config. */ @@ -3848,7 +3854,7 @@ srv_master_thread( /* Flush logs if needed */ srv_sync_log_buffer_in_background(); - if (srv_n_purge_threads == 0 || (srv_shutdown_state > 0 && srv_n_threads_active[SRV_WORKER] == 0)) { + if (srv_n_purge_threads == 0) { srv_main_thread_op_info = "master purging"; srv_master_do_purge(); @@ -3926,7 +3932,7 @@ srv_master_thread( } } - if (srv_n_purge_threads == 0 || (srv_shutdown_state > 0 && srv_n_threads_active[SRV_WORKER] == 0)) { + if (srv_n_purge_threads == 0) { srv_main_thread_op_info = "master purging"; srv_master_do_purge(); @@ -4142,9 +4148,10 @@ srv_purge_thread( We peek at the history len without holding any mutex because in the worst case we will end up waiting for the next purge event. */ - if (trx_sys->rseg_history_len < srv_purge_batch_size - || (n_total_purged == 0 - && retries >= TRX_SYS_N_RSEGS)) { + if (srv_shutdown_state == SRV_SHUTDOWN_NONE + && (trx_sys->rseg_history_len < srv_purge_batch_size + || (n_total_purged == 0 + && retries >= TRX_SYS_N_RSEGS))) { mutex_enter(&kernel_mutex); @@ -4159,8 +4166,12 @@ srv_purge_thread( /* Check for shutdown and whether we should do purge at all. */ if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND - || srv_shutdown_state != 0 - || srv_fast_shutdown) { + || (srv_shutdown_state != SRV_SHUTDOWN_NONE + && srv_fast_shutdown) + || (srv_shutdown_state != SRV_SHUTDOWN_NONE + && srv_fast_shutdown == 0 + && n_total_purged == 0 + && retries >= TRX_SYS_N_RSEGS)) { break; } @@ -4183,6 +4194,9 @@ srv_purge_thread( srv_sync_log_buffer_in_background(); + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) + continue; + cur_time = ut_time_ms(); if (next_itr_time > cur_time) { os_thread_sleep(ut_min(1000000, From 383007c75d6ef5043fa5781956a6a02b24e2b79e Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 13 Oct 2016 21:35:01 +0200 Subject: [PATCH 08/44] mysql cli: fix USE command quoting * use proper sql quoting rules for USE, while preserving as much of historical behavior as possible * short commands (\u) behave as before --- client/mysql.cc | 56 +++++++++++++++++++++------------------ mysql-test/r/mysql.result | 8 ++++++ mysql-test/t/mysql.test | 8 ++++++ 3 files changed, 46 insertions(+), 26 deletions(-) diff --git a/client/mysql.cc b/client/mysql.cc index 9d255b55430a1..9b1999f2c3849 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -245,7 +245,8 @@ static void end_pager(); static void init_tee(const char *); static void end_tee(); static const char* construct_prompt(); -static char *get_arg(char *line, my_bool get_next_arg); +enum get_arg_mode { CHECK, GET, GET_NEXT}; +static char *get_arg(char *line, get_arg_mode mode); static void init_username(); static void add_int_to_prompt(int toadd); static int get_result_width(MYSQL_RES *res); @@ -2223,7 +2224,7 @@ static COMMANDS *find_command(char *name) if (!my_strnncoll(&my_charset_latin1, (uchar*) name, len, (uchar*) commands[i].name, len) && (commands[i].name[len] == '\0') && - (!end || commands[i].takes_params)) + (!end || (commands[i].takes_params && get_arg(name, CHECK)))) { index= i; break; @@ -3143,7 +3144,7 @@ com_charset(String *buffer __attribute__((unused)), char *line) char buff[256], *param; CHARSET_INFO * new_cs; strmake_buf(buff, line); - param= get_arg(buff, 0); + param= get_arg(buff, GET); if (!param || !*param) { return put_info("Usage: \\C charset_name | charset charset_name", @@ -4228,12 +4229,12 @@ com_connect(String *buffer, char *line) #ifdef EXTRA_DEBUG tmp[1]= 0; #endif - tmp= get_arg(buff, 0); + tmp= get_arg(buff, GET); if (tmp && *tmp) { my_free(current_db); current_db= my_strdup(tmp, MYF(MY_WME)); - tmp= get_arg(buff, 1); + tmp= get_arg(buff, GET_NEXT); if (tmp) { my_free(current_host); @@ -4336,7 +4337,7 @@ com_delimiter(String *buffer __attribute__((unused)), char *line) char buff[256], *tmp; strmake_buf(buff, line); - tmp= get_arg(buff, 0); + tmp= get_arg(buff, GET); if (!tmp || !*tmp) { @@ -4367,7 +4368,7 @@ com_use(String *buffer __attribute__((unused)), char *line) bzero(buff, sizeof(buff)); strmake_buf(buff, line); - tmp= get_arg(buff, 0); + tmp= get_arg(buff, GET); if (!tmp || !*tmp) { put_info("USE must be followed by a database name", INFO_ERROR); @@ -4452,23 +4453,22 @@ com_nowarnings(String *buffer __attribute__((unused)), } /* - Gets argument from a command on the command line. If get_next_arg is - not defined, skips the command and returns the first argument. The - line is modified by adding zero to the end of the argument. If - get_next_arg is defined, then the function searches for end of string - first, after found, returns the next argument and adds zero to the - end. If you ever wish to use this feature, remember to initialize all - items in the array to zero first. + Gets argument from a command on the command line. If mode is not GET_NEXT, + skips the command and returns the first argument. The line is modified by + adding zero to the end of the argument. If mode is GET_NEXT, then the + function searches for end of string first, after found, returns the next + argument and adds zero to the end. If you ever wish to use this feature, + remember to initialize all items in the array to zero first. */ -char *get_arg(char *line, my_bool get_next_arg) +static char *get_arg(char *line, get_arg_mode mode) { char *ptr, *start; - my_bool quoted= 0, valid_arg= 0; + bool short_cmd= false; char qtype= 0; ptr= line; - if (get_next_arg) + if (mode == GET_NEXT) { for (; *ptr; ptr++) ; if (*(ptr + 1)) @@ -4479,7 +4479,7 @@ char *get_arg(char *line, my_bool get_next_arg) /* skip leading white spaces */ while (my_isspace(charset_info, *ptr)) ptr++; - if (*ptr == '\\') // short command was used + if ((short_cmd= *ptr == '\\')) // short command was used ptr+= 2; else while (*ptr &&!my_isspace(charset_info, *ptr)) // skip command @@ -4492,24 +4492,28 @@ char *get_arg(char *line, my_bool get_next_arg) if (*ptr == '\'' || *ptr == '\"' || *ptr == '`') { qtype= *ptr; - quoted= 1; ptr++; } for (start=ptr ; *ptr; ptr++) { - if (*ptr == '\\' && ptr[1]) // escaped character + if ((*ptr == '\\' && ptr[1]) || // escaped character + (!short_cmd && qtype && *ptr == qtype && ptr[1] == qtype)) // quote { - // Remove the backslash - strmov_overlapp(ptr, ptr+1); + // Remove (or skip) the backslash (or a second quote) + if (mode != CHECK) + strmov_overlapp(ptr, ptr+1); + else + ptr++; } - else if ((!quoted && *ptr == ' ') || (quoted && *ptr == qtype)) + else if (*ptr == (qtype ? qtype : ' ')) { - *ptr= 0; + qtype= 0; + if (mode != CHECK) + *ptr= 0; break; } } - valid_arg= ptr != start; - return valid_arg ? start : NullS; + return ptr != start && !qtype ? start : NullS; } diff --git a/mysql-test/r/mysql.result b/mysql-test/r/mysql.result index cb705d285fe3d..dd0129df0d9fe 100644 --- a/mysql-test/r/mysql.result +++ b/mysql-test/r/mysql.result @@ -512,6 +512,14 @@ DROP DATABASE connected_db; create database `aa``bb````cc`; DATABASE() aa`bb``cc +DATABASE() +test +DATABASE() +aa`bb``cc +DATABASE() +test +DATABASE() +aa`bb``cc drop database `aa``bb````cc`; a >>\ndelimiter\n<< diff --git a/mysql-test/t/mysql.test b/mysql-test/t/mysql.test index 6281bb5f4c1b7..d59083d66b0c7 100644 --- a/mysql-test/t/mysql.test +++ b/mysql-test/t/mysql.test @@ -586,8 +586,16 @@ DROP DATABASE connected_db; # USE and names with backticks # --write_file $MYSQLTEST_VARDIR/tmp/backticks.sql +\u aa`bb``cc +SELECT DATABASE(); +USE test +SELECT DATABASE(); USE aa`bb``cc SELECT DATABASE(); +USE test +SELECT DATABASE(); +USE `aa``bb````cc` +SELECT DATABASE(); EOF create database `aa``bb````cc`; --exec $MYSQL < $MYSQLTEST_VARDIR/tmp/backticks.sql From 01b39b7b0730102b88d8ea43ec719a75e9316a1e Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 13 Oct 2016 20:58:08 +0200 Subject: [PATCH 09/44] mysqltest: don't eat new lines in --exec pass them through as is --- client/mysqltest.cc | 4 ---- mysql-test/r/mysql_not_windows.result | 6 ++++++ mysql-test/r/mysqltest.result | 6 ------ mysql-test/t/mysql_not_windows.test | 9 +++++++++ mysql-test/t/mysqltest.test | 9 --------- 5 files changed, 15 insertions(+), 19 deletions(-) diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 3652d1a40e2be..acb9e8b1e0c06 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -3349,10 +3349,6 @@ void do_exec(struct st_command *command) #endif #endif - /* exec command is interpreted externally and will not take newlines */ - while(replace(&ds_cmd, "\n", 1, " ", 1) == 0) - ; - DBUG_PRINT("info", ("Executing '%s' as '%s'", command->first_argument, ds_cmd.str)); diff --git a/mysql-test/r/mysql_not_windows.result b/mysql-test/r/mysql_not_windows.result index d5670a1a9ca38..1df62d9a12dcf 100644 --- a/mysql-test/r/mysql_not_windows.result +++ b/mysql-test/r/mysql_not_windows.result @@ -3,3 +3,9 @@ a 1 End of tests +1 +1 +2 +2 +X +3 diff --git a/mysql-test/r/mysqltest.result b/mysql-test/r/mysqltest.result index 865c8d7077b96..0ebef585974bc 100644 --- a/mysql-test/r/mysqltest.result +++ b/mysql-test/r/mysqltest.result @@ -269,12 +269,6 @@ source database echo message echo message mysqltest: At line 1: Missing argument in exec -1 -1 -2 -2 -X -3 MySQL "MySQL" MySQL: The diff --git a/mysql-test/t/mysql_not_windows.test b/mysql-test/t/mysql_not_windows.test index 66853677f7bb6..591de74cbbf47 100644 --- a/mysql-test/t/mysql_not_windows.test +++ b/mysql-test/t/mysql_not_windows.test @@ -13,3 +13,12 @@ --echo --echo End of tests + +# Multi-line exec +exec $MYSQL \ + test -e "select 1"; +exec $MYSQL test -e "select + 2"; +let $query = select 3 + as X; +exec $MYSQL test -e "$query"; diff --git a/mysql-test/t/mysqltest.test b/mysql-test/t/mysqltest.test index ffbec36873ef5..6470ede4f14c0 100644 --- a/mysql-test/t/mysqltest.test +++ b/mysql-test/t/mysqltest.test @@ -741,15 +741,6 @@ echo ; --error 1 --exec echo "--exec " | $MYSQL_TEST 2>&1 -# Multi-line exec -exec $MYSQL - test -e "select 1"; -exec $MYSQL test -e "select - 2"; -let $query = select 3 - as X; -exec $MYSQL test -e "$query"; - # ---------------------------------------------------------------------------- # Test let command # ---------------------------------------------------------------------------- From 5a43a31ee81bc181eeb5ef2bf0704befa6e0594d Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 14 Oct 2016 00:33:49 +0200 Subject: [PATCH 10/44] mysqldump: comments and identifiers with new lines don't let identifiers with new lines to break a comment --- client/mysqldump.c | 60 ++++++++++----- mysql-test/r/mysqldump-nl.result | 126 +++++++++++++++++++++++++++++++ mysql-test/t/mysqldump-nl.test | 38 ++++++++++ 3 files changed, 207 insertions(+), 17 deletions(-) create mode 100644 mysql-test/r/mysqldump-nl.result create mode 100644 mysql-test/t/mysqldump-nl.test diff --git a/client/mysqldump.c b/client/mysqldump.c index 16b39b77cf15f..32c350d307869 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -547,9 +547,7 @@ static int dump_all_tablespaces(); static int dump_tablespaces_for_tables(char *db, char **table_names, int tables); static int dump_tablespaces_for_databases(char** databases); static int dump_tablespaces(char* ts_where); -static void print_comment(FILE *sql_file, my_bool is_error, const char *format, - ...); - +static void print_comment(FILE *, my_bool, const char *, ...); /* Print the supplied message if in verbose mode @@ -627,6 +625,30 @@ static void short_usage(FILE *f) } +/** returns a string fixed to be safely printed inside a -- comment + + that is, any new line in it gets prefixed with -- +*/ +static const char *fix_for_comment(const char *ident) +{ + static char buf[1024]; + char c, *s= buf; + + while ((c= *s++= *ident++)) + { + if (s >= buf + sizeof(buf) - 10) + { + strmov(s, "..."); + break; + } + if (c == '\n') + s= strmov(s, "-- "); + } + + return buf; +} + + static void write_header(FILE *sql_file, char *db_name) { if (opt_xml) @@ -649,8 +671,8 @@ static void write_header(FILE *sql_file, char *db_name) DUMP_VERSION, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); print_comment(sql_file, 0, "-- Host: %s Database: %s\n", - current_host ? current_host : "localhost", - db_name ? db_name : ""); + fix_for_comment(current_host ? current_host : "localhost"), + fix_for_comment(db_name ? db_name : "")); print_comment(sql_file, 0, "-- ------------------------------------------------------\n" ); @@ -2094,7 +2116,8 @@ static uint dump_events_for_db(char *db) /* nice comments */ print_comment(sql_file, 0, - "\n--\n-- Dumping events for database '%s'\n--\n", db); + "\n--\n-- Dumping events for database '%s'\n--\n", + fix_for_comment(db)); /* not using "mysql_query_with_error_report" because we may have not @@ -2307,7 +2330,8 @@ static uint dump_routines_for_db(char *db) /* nice comments */ print_comment(sql_file, 0, - "\n--\n-- Dumping routines for database '%s'\n--\n", db); + "\n--\n-- Dumping routines for database '%s'\n--\n", + fix_for_comment(db)); /* not using "mysql_query_with_error_report" because we may have not @@ -2580,11 +2604,11 @@ static uint get_table_structure(char *table, char *db, char *table_type, if (strcmp (table_type, "VIEW") == 0) /* view */ print_comment(sql_file, 0, "\n--\n-- Temporary table structure for view %s\n--\n\n", - result_table); + fix_for_comment(result_table)); else print_comment(sql_file, 0, "\n--\n-- Table structure for table %s\n--\n\n", - result_table); + fix_for_comment(result_table)); if (opt_drop) { @@ -2826,7 +2850,7 @@ static uint get_table_structure(char *table, char *db, char *table_type, print_comment(sql_file, 0, "\n--\n-- Table structure for table %s\n--\n\n", - result_table); + fix_for_comment(result_table)); if (opt_drop) fprintf(sql_file, "DROP TABLE IF EXISTS %s;\n", result_table); if (!opt_xml) @@ -3530,21 +3554,21 @@ static void dump_table(char *table, char *db) { print_comment(md_result_file, 0, "\n--\n-- Dumping data for table %s\n--\n", - result_table); + fix_for_comment(result_table)); dynstr_append_checked(&query_string, "SELECT /*!40001 SQL_NO_CACHE */ * FROM "); dynstr_append_checked(&query_string, result_table); if (where) { - print_comment(md_result_file, 0, "-- WHERE: %s\n", where); + print_comment(md_result_file, 0, "-- WHERE: %s\n", fix_for_comment(where)); dynstr_append_checked(&query_string, " WHERE "); dynstr_append_checked(&query_string, where); } if (order_by) { - print_comment(md_result_file, 0, "-- ORDER BY: %s\n", order_by); + print_comment(md_result_file, 0, "-- ORDER BY: %s\n", fix_for_comment(order_by)); dynstr_append_checked(&query_string, " ORDER BY "); dynstr_append_checked(&query_string, order_by); @@ -4053,7 +4077,7 @@ static int dump_tablespaces(char* ts_where) if (first) { print_comment(md_result_file, 0, "\n--\n-- Logfile group: %s\n--\n", - row[0]); + fix_for_comment(row[0])); fprintf(md_result_file, "\nCREATE"); } @@ -4122,7 +4146,8 @@ static int dump_tablespaces(char* ts_where) first= 1; if (first) { - print_comment(md_result_file, 0, "\n--\n-- Tablespace: %s\n--\n", row[0]); + print_comment(md_result_file, 0, "\n--\n-- Tablespace: %s\n--\n", + fix_for_comment(row[0])); fprintf(md_result_file, "\nCREATE"); } else @@ -4326,7 +4351,8 @@ static int init_dumping(char *database, int init_func(char*)) char *qdatabase= quote_name(database,quoted_database_buf,opt_quoted); print_comment(md_result_file, 0, - "\n--\n-- Current Database: %s\n--\n", qdatabase); + "\n--\n-- Current Database: %s\n--\n", + fix_for_comment(qdatabase)); /* Call the view or table specific function */ init_func(qdatabase); @@ -5356,7 +5382,7 @@ static my_bool get_view_structure(char *table, char* db) print_comment(sql_file, 0, "\n--\n-- Final view structure for view %s\n--\n\n", - result_table); + fix_for_comment(result_table)); /* Table might not exist if this view was dumped with --tab. */ fprintf(sql_file, "/*!50001 DROP TABLE IF EXISTS %s*/;\n", opt_quoted_table); diff --git a/mysql-test/r/mysqldump-nl.result b/mysql-test/r/mysqldump-nl.result new file mode 100644 index 0000000000000..6de439bdf3c61 --- /dev/null +++ b/mysql-test/r/mysqldump-nl.result @@ -0,0 +1,126 @@ +create database `mysqltest1 +1tsetlqsym`; +use `mysqltest1 +1tsetlqsym`; +create table `t1 +1t` (`foobar +raboof` int); +create view `v1 +1v` as select * from `t1 +1t`; +create procedure sp() select * from `v1 +1v`; +flush tables; +use test; + +-- +-- Current Database: `mysqltest1 +-- 1tsetlqsym` +-- + +/*!40000 DROP DATABASE IF EXISTS `mysqltest1 +1tsetlqsym`*/; + +CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqltest1 +1tsetlqsym` /*!40100 DEFAULT CHARACTER SET latin1 */; + +USE `mysqltest1 +1tsetlqsym`; + +-- +-- Table structure for table `t1 +-- 1t` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t1 +1t` ( + `foobar +raboof` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Dumping data for table `t1 +-- 1t` +-- + +-- +-- Temporary table structure for view `v1 +-- 1v` +-- + +SET @saved_cs_client = @@character_set_client; +SET character_set_client = utf8; +/*!50001 CREATE TABLE `v1 +1v` ( + `foobar +raboof` tinyint NOT NULL +) ENGINE=MyISAM */; +SET character_set_client = @saved_cs_client; + +-- +-- Dumping routines for database 'mysqltest1 +-- 1tsetlqsym' +-- +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = latin1 */ ; +/*!50003 SET character_set_results = latin1 */ ; +/*!50003 SET collation_connection = latin1_swedish_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +CREATE DEFINER=`root`@`localhost` PROCEDURE `sp`() +select * from `v1 +1v` ;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; + +-- +-- Current Database: `mysqltest1 +-- 1tsetlqsym` +-- + +USE `mysqltest1 +1tsetlqsym`; + +-- +-- Final view structure for view `v1 +-- 1v` +-- + +/*!50001 DROP TABLE IF EXISTS `v1 +1v`*/; +/*!50001 SET @saved_cs_client = @@character_set_client */; +/*!50001 SET @saved_cs_results = @@character_set_results */; +/*!50001 SET @saved_col_connection = @@collation_connection */; +/*!50001 SET character_set_client = latin1 */; +/*!50001 SET character_set_results = latin1 */; +/*!50001 SET collation_connection = latin1_swedish_ci */; +/*!50001 CREATE ALGORITHM=UNDEFINED */ +/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */ +/*!50001 VIEW `v1 +1v` AS select `t1 +1t`.`foobar +raboof` AS `foobar +raboof` from `t1 +1t` */; +/*!50001 SET character_set_client = @saved_cs_client */; +/*!50001 SET character_set_results = @saved_cs_results */; +/*!50001 SET collation_connection = @saved_col_connection */; +show tables from `mysqltest1 +1tsetlqsym`; +Tables_in_mysqltest1 +1tsetlqsym +t1 +1t +v1 +1v +drop database `mysqltest1 +1tsetlqsym`; diff --git a/mysql-test/t/mysqldump-nl.test b/mysql-test/t/mysqldump-nl.test new file mode 100644 index 0000000000000..311996e77c305 --- /dev/null +++ b/mysql-test/t/mysqldump-nl.test @@ -0,0 +1,38 @@ +# +# New lines in identifiers +# + +# embedded server doesn't support external clients +--source include/not_embedded.inc +# cmd.exe doesn't like new lines on the command line +--source include/not_windows.inc + +create database `mysqltest1 +1tsetlqsym`; +use `mysqltest1 +1tsetlqsym`; + +create table `t1 +1t` (`foobar +raboof` int); +create view `v1 +1v` as select * from `t1 +1t`; + +create procedure sp() select * from `v1 +1v`; + +flush tables; +use test; + +exec $MYSQL_DUMP --compact --comment --routines --add-drop-database --databases 'mysqltest1 +1tsetlqsym'; + +exec $MYSQL_DUMP --compact --comment --routines --add-drop-database --databases 'mysqltest1 +1tsetlqsym' | $MYSQL; + +show tables from `mysqltest1 +1tsetlqsym`; + +drop database `mysqltest1 +1tsetlqsym`; From eac8d95ffcdea7cd31d60d273e30cb3dfec66add Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 14 Oct 2016 12:51:53 +0200 Subject: [PATCH 11/44] compilation warning after xtradb merge --- storage/xtradb/handler/ha_innodb.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 386920e689d1b..66fcc2799bb94 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -481,7 +481,7 @@ int innobase_get_parent_fk_list( THD* thd, const char* path, - List* f_key_list); + List* f_key_list) __attribute__((unused)); /******************************************************************//** Maps a MySQL trx isolation level code to the InnoDB isolation level code From b7aee7dbe71cf77199e28e905469f0d9fb6d4a80 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 14 Oct 2016 18:29:33 +0200 Subject: [PATCH 12/44] - Fix MDEV-10950. Null values not retrieved for numeric types. Now the null is tested using the result set getObject method. modified: storage/connect/JdbcInterface.java modified: storage/connect/jdbconn.cpp modified: storage/connect/jdbconn.h --- storage/connect/JdbcInterface.java | 4 +-- storage/connect/jdbconn.cpp | 39 ++++++++++++++++++++---------- storage/connect/jdbconn.h | 1 + 3 files changed, 29 insertions(+), 15 deletions(-) diff --git a/storage/connect/JdbcInterface.java b/storage/connect/JdbcInterface.java index e339c9891133a..34af8c4e013c2 100644 --- a/storage/connect/JdbcInterface.java +++ b/storage/connect/JdbcInterface.java @@ -692,11 +692,11 @@ public int TimestampField(int n, String name) { return 0; } // end of TimestampField - public String ObjectField(int n, String name) { + public Object ObjectField(int n, String name) { if (rs == null) { System.out.println("No result set"); } else try { - return (n > 0) ? rs.getObject(n).toString() : rs.getObject(name).toString(); + return (n > 0) ? rs.getObject(n) : rs.getObject(name); } catch (SQLException se) { SetErrmsg(se); } //end try/catch diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index 229ade53ad1cf..dca9bd0eac4e5 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -512,7 +512,7 @@ JDBConn::JDBConn(PGLOBAL g, TDBJDBC *tdbp) xqid = xuid = xid = grs = readid = fetchid = typid = errid = nullptr; prepid = xpid = pcid = nullptr; chrfldid = intfldid = dblfldid = fltfldid = bigfldid = nullptr; - datfldid = timfldid = tspfldid = nullptr; + objfldid = datfldid = timfldid = tspfldid = nullptr; //m_LoginTimeout = DEFAULT_LOGIN_TIMEOUT; //m_QueryTimeout = DEFAULT_QUERY_TIMEOUT; //m_UpdateOptions = 0; @@ -1167,9 +1167,10 @@ void JDBConn::Close() /***********************************************************************/ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) { - PGLOBAL& g = m_G; - jint ctyp; - jstring cn, jn = nullptr; + PGLOBAL& g = m_G; + jint ctyp; + jstring cn, jn = nullptr; + jobject jb = nullptr; if (rank == 0) if (!name || (jn = env->NewStringUTF(name)) == nullptr) { @@ -1185,21 +1186,32 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC); } // endif Check + if (val->GetNullable()) + if (!gmID(g, objfldid, "ObjectField", "(ILjava/lang/String;)Ljava/lang/Object;")) { + jb = env->CallObjectMethod(job, objfldid, (jint)rank, jn); + + if (jb == nullptr) { + val->Reset(); + val->SetNull(true); + goto chk; + } // endif job + + } // endif objfldid + switch (ctyp) { case 12: // VARCHAR case -1: // LONGVARCHAR case 1: // CHAR - if (!gmID(g, chrfldid, "StringField", "(ILjava/lang/String;)Ljava/lang/String;")) { + if (jb) + cn = (jstring)jb; + else if (!gmID(g, chrfldid, "StringField", "(ILjava/lang/String;)Ljava/lang/String;")) cn = (jstring)env->CallObjectMethod(job, chrfldid, (jint)rank, jn); + else + cn = nullptr; - if (cn) { - const char *field = env->GetStringUTFChars(cn, (jboolean)false); - val->SetValue_psz((PSZ)field); - } else { - val->Reset(); - val->SetNull(true); - } // endif cn - + if (cn) { + const char *field = env->GetStringUTFChars(cn, (jboolean)false); + val->SetValue_psz((PSZ)field); } else val->Reset(); @@ -1271,6 +1283,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) val->Reset(); } // endswitch Type + chk: if (Check()) { if (rank == 0) env->DeleteLocalRef(jn); diff --git a/storage/connect/jdbconn.h b/storage/connect/jdbconn.h index 095b1565bd276..0a1c52d4576d1 100644 --- a/storage/connect/jdbconn.h +++ b/storage/connect/jdbconn.h @@ -165,6 +165,7 @@ class JDBConn : public BLOCK { jmethodID xpid; // The ExecutePrep method ID jmethodID pcid; // The ClosePrepStmt method ID jmethodID errid; // The GetErrmsg method ID + jmethodID objfldid; // The ObjectField method ID jmethodID chrfldid; // The StringField method ID jmethodID intfldid; // The IntField method ID jmethodID dblfldid; // The DoubleField method ID From f6d4f82d6e49ed1ca2155c9e0e12f3dd8fcb1acf Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Fri, 14 Oct 2016 23:23:16 +0300 Subject: [PATCH 13/44] MDEV-11061 Valgrind builder produces endless warnings after switching to OpenSS --- mysql-test/valgrind.supp | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp index 723b609de8f41..2dd10ff4008a2 100644 --- a/mysql-test/valgrind.supp +++ b/mysql-test/valgrind.supp @@ -1228,3 +1228,25 @@ fun:dlopen@@GLIBC_2.2.5 } +{ + MDEV-11061: OpenSSL 0.9.8 - Conditional jump or move + Memcheck:Cond + fun:BN_* + ... + fun:ssl3_ctx_ctrl + fun:new_VioSSLFd + fun:new_VioSSLAcceptorFd + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 - Use of uninitialised value + Memcheck:Value8 + fun:BN_* + ... + fun:ssl3_ctx_ctrl + fun:new_VioSSLFd + fun:new_VioSSLAcceptorFd + ... +} + From 8a49e00f3f1a81b6645ac3f2d843c9e5dd0375ba Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Fri, 14 Oct 2016 23:23:49 +0300 Subject: [PATCH 14/44] More unstable tests --- mysql-test/unstable-tests | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests index 6a46602eb0795..2dbaeeebc0bd6 100644 --- a/mysql-test/unstable-tests +++ b/mysql-test/unstable-tests @@ -56,6 +56,7 @@ main.named_pipe : Modified on 2016-08-02 (MDEV-10383) main.openssl_1 : Modified on 2016-07-11 (MDEV-10211) main.parser : Modified on 2016-06-21 (merge) main.pool_of_threads : MDEV-10100 - sporadic error on detecting max connections +main.ps : MDEV-11017 - sporadic wrong Prepared_stmt_count main.ps_1general : Modified on 2016-07-12 (merge) main.range : Modified on 2016-08-10 (merge) main.range_mrr_icp : Modified on 2016-08-10 (merge) @@ -116,6 +117,7 @@ innodb.innodb_corrupt_bit : Modified on 2016-06-21 (merge) innodb.innodb_bug30423 : MDEV-7311 - Wrong number of rows in the plan innodb.innodb-fk-warnings : Modified on 2016-07-18 (MDEV-8569) innodb.innodb-fkcheck : Modified on 2016-06-13 (MDEV-10083) +innodb.innodb_monitor : MDEV-10939 - Testcase timeout innodb.innodb-wl5522 : rdiff file modified on 2016-08-10 (merge) innodb.innodb-wl5522-debug-zip : MDEV-10427 - Warning: database page corruption @@ -145,6 +147,7 @@ parts.partition_int_myisam : MDEV-10621 - Testcase timeout perfschema.digest_table_full : Modified on 2016-06-21 (merge) perfschema.func_file_io : MDEV-5708 - fails for s390x perfschema.func_mutex : MDEV-5708 - fails for s390x +perfschema.hostcache_ipv6_ssl : MDEV-10696 - crash on shutdown perfschema.rpl_gtid_func : Modified on 2016-06-21 (merge) perfschema.sizing_low : Modified on 2016-04-26 (5.6.30 merge) perfschema.socket_summary_by_event_name_func : MDEV-10622 - Socket summary tables do not match From 4192c468675220e0ad2de9eb722cfa457c0e5ced Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Sun, 16 Oct 2016 04:46:39 +0300 Subject: [PATCH 15/44] MDEV-11061 Valgrind builder produces endless warnings OpenSSL problems, part II --- mysql-test/valgrind.supp | 117 +++++++++++++++++++++++++++++++++++---- 1 file changed, 107 insertions(+), 10 deletions(-) diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp index 2dd10ff4008a2..77f17cf07ec0c 100644 --- a/mysql-test/valgrind.supp +++ b/mysql-test/valgrind.supp @@ -1228,25 +1228,122 @@ fun:dlopen@@GLIBC_2.2.5 } +# +# MDEV-11061: OpenSSL 0.9.8 problems +# + { - MDEV-11061: OpenSSL 0.9.8 - Conditional jump or move + MDEV-11061: OpenSSL 0.9.8 Memcheck:Cond - fun:BN_* + obj:*/libz.so* + ... + obj:*/libcrypto.so.0.9.8 ... - fun:ssl3_ctx_ctrl - fun:new_VioSSLFd - fun:new_VioSSLAcceptorFd + obj:*/libssl.so.0.9.8 ... } { - MDEV-11061: OpenSSL 0.9.8 - Use of uninitialised value + MDEV-11061: OpenSSL 0.9.8 Memcheck:Value8 - fun:BN_* + obj:*/libz.so* + ... + obj:*/libcrypto.so.0.9.8 + ... + obj:*/libssl.so.0.9.8 + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 + Memcheck:Cond + obj:*/libcrypto.so.0.9.8 + ... + obj:*/libssl.so.0.9.8 + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 + Memcheck:Value8 + obj:*/libcrypto.so.0.9.8 + ... + obj:*/libssl.so.0.9.8 + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 + Memcheck:Cond + obj:*/libssl.so.0.9.8 + obj:*/libssl.so.0.9.8 + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 + Memcheck:Value8 + obj:*/libssl.so.0.9.8 + obj:*/libssl.so.0.9.8 + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 + Memcheck:Cond + fun:memcpy + obj:*/libcrypto.so.0.9.8 + obj:*/libssl.so.0.9.8 + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 + Memcheck:Value8 + fun:memcpy + obj:*/libcrypto.so.0.9.8 + obj:*/libssl.so.0.9.8 + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 + Memcheck:Cond + fun:is_overlap + fun:memcpy + obj:*/libcrypto.so.0.9.8 + obj:*/libssl.so.0.9.8 + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 + Memcheck:Cond + fun:memset + obj:*/libcrypto.so.0.9.8 + ... + obj:*/libssl.so.0.9.8 + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 + Memcheck:Value8 + fun:memset + obj:*/libcrypto.so.0.9.8 + ... + obj:*/libssl.so.0.9.8 + ... +} + +{ + MDEV-11061: OpenSSL 0.9.8 + Memcheck:Param + write(buf) + obj:*/libpthread-2.9.so* + obj:*/libcrypto.so.0.9.8 ... - fun:ssl3_ctx_ctrl - fun:new_VioSSLFd - fun:new_VioSSLAcceptorFd + obj:*/libssl.so.0.9.8 ... } From df87be5edafb402e36e9c16aa0f00b1d5104d920 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Mon, 17 Oct 2016 14:04:45 +0300 Subject: [PATCH 16/44] MDEV-11069 main.information_schema test fails if hostname includes 'user' Patch provided by Honza Horak --- mysql-test/r/information_schema.result | 8 ++++---- mysql-test/t/information_schema.test | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index d98f8168e9e03..1f765a70137ca 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -986,19 +986,19 @@ show grants; Grants for user3@localhost GRANT USAGE ON *.* TO 'user3'@'localhost' GRANT SELECT ON `mysqltest`.* TO 'user3'@'localhost' -select * from information_schema.column_privileges where grantee like '%user%' +select * from information_schema.column_privileges where grantee like '\'user%' order by grantee; GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE 'user1'@'localhost' def mysqltest t1 f1 SELECT NO -select * from information_schema.table_privileges where grantee like '%user%' +select * from information_schema.table_privileges where grantee like '\'user%' order by grantee; GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PRIVILEGE_TYPE IS_GRANTABLE 'user2'@'localhost' def mysqltest t2 SELECT NO -select * from information_schema.schema_privileges where grantee like '%user%' +select * from information_schema.schema_privileges where grantee like '\'user%' order by grantee; GRANTEE TABLE_CATALOG TABLE_SCHEMA PRIVILEGE_TYPE IS_GRANTABLE 'user3'@'localhost' def mysqltest SELECT NO -select * from information_schema.user_privileges where grantee like '%user%' +select * from information_schema.user_privileges where grantee like '\'user%' order by grantee; GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE 'user1'@'localhost' def USAGE NO diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test index fb39f2e5d580b..943eb8bab8ab6 100644 --- a/mysql-test/t/information_schema.test +++ b/mysql-test/t/information_schema.test @@ -612,13 +612,13 @@ select * from information_schema.schema_privileges order by grantee; select * from information_schema.user_privileges order by grantee; show grants; connection con4; -select * from information_schema.column_privileges where grantee like '%user%' +select * from information_schema.column_privileges where grantee like '\'user%' order by grantee; -select * from information_schema.table_privileges where grantee like '%user%' +select * from information_schema.table_privileges where grantee like '\'user%' order by grantee; -select * from information_schema.schema_privileges where grantee like '%user%' +select * from information_schema.schema_privileges where grantee like '\'user%' order by grantee; -select * from information_schema.user_privileges where grantee like '%user%' +select * from information_schema.user_privileges where grantee like '\'user%' order by grantee; show grants; connection default; From 6e257274d98843b228e5bd08da74031f6f3a202d Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Mon, 17 Oct 2016 11:43:47 -0400 Subject: [PATCH 17/44] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index d44c8b2800612..4f1ecb3a1972b 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=5 MYSQL_VERSION_MINOR=5 -MYSQL_VERSION_PATCH=53 +MYSQL_VERSION_PATCH=54 MYSQL_VERSION_EXTRA= From 4dfb6a3f54cfb26535636197cc5fa70fe5bacc2e Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Wed, 28 Sep 2016 14:16:38 +0000 Subject: [PATCH 18/44] MDEV-11083 performance schema test fail with threadpool Fix PSI idle and socket instrumentation in threadpool --- sql/threadpool_common.cc | 99 ++++++++++++++++++++++++++-------------- 1 file changed, 66 insertions(+), 33 deletions(-) diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc index 5bcea767aae06..9d263038bc95f 100644 --- a/sql/threadpool_common.cc +++ b/sql/threadpool_common.cc @@ -73,17 +73,16 @@ struct Worker_thread_context void save() { -#ifdef HAVE_PSI_INTERFACE - psi_thread= PSI_server?PSI_server->get_thread():0; +#ifdef HAVE_PSI_THREAD_INTERFACE + psi_thread = PSI_THREAD_CALL(get_thread)(); #endif mysys_var= (st_my_thread_var *)pthread_getspecific(THR_KEY_mysys); } void restore() { -#ifdef HAVE_PSI_INTERFACE - if (PSI_server) - PSI_server->set_thread(psi_thread); +#ifdef HAVE_PSI_THREAD_INTERFACE + PSI_THREAD_CALL(set_thread)(psi_thread); #endif pthread_setspecific(THR_KEY_mysys,mysys_var); pthread_setspecific(THR_THD, 0); @@ -92,6 +91,41 @@ struct Worker_thread_context }; +#ifdef HAVE_PSI_INTERFACE + +/* + The following fixes PSI "idle" psi instrumentation. + The server assumes that connection becomes idle + just before net_read_packet() and switches to active after it. + In out setup, server becomes idle when async socket io is made. +*/ + +extern void net_before_header_psi(struct st_net *net, void *user_data, size_t); + +static void dummy_before_header(struct st_net *, void *, size_t) +{ +} + +static void re_init_net_server_extension(THD *thd) +{ + thd->m_net_server_extension.m_before_header = dummy_before_header; +} + +#else + +#define re_init_net_server_extension(thd) + +#endif /* HAVE_PSI_INTERFACE */ + + +static inline void set_thd_idle(THD *thd) +{ + thd->net.reading_or_writing= 1; +#ifdef HAVE_PSI_INTERFACE + net_before_header_psi(&thd->net, thd, 0); +#endif +} + /* Attach/associate the connection with the OS thread, */ @@ -100,10 +134,10 @@ static bool thread_attach(THD* thd) pthread_setspecific(THR_KEY_mysys,thd->mysys_var); thd->thread_stack=(char*)&thd; thd->store_globals(); -#ifdef HAVE_PSI_INTERFACE - if (PSI_server) - PSI_server->set_thread(thd->event_scheduler.m_psi); +#ifdef HAVE_PSI_THREAD_INTERFACE + PSI_THREAD_CALL(set_thread)(thd->event_scheduler.m_psi); #endif + mysql_socket_set_thread_owner(thd->net.vio->mysql_socket); return 0; } @@ -130,40 +164,38 @@ int threadpool_add_connection(THD *thd) } /* Create new PSI thread for use with the THD. */ -#ifdef HAVE_PSI_INTERFACE - if (PSI_server) - { - thd->event_scheduler.m_psi = - PSI_server->new_thread(key_thread_one_connection, thd, thd->thread_id); - } +#ifdef HAVE_PSI_THREAD_INTERFACE + thd->event_scheduler.m_psi= + PSI_THREAD_CALL(new_thread)(key_thread_one_connection, thd, thd->thread_id); #endif /* Login. */ thread_attach(thd); + re_init_net_server_extension(thd); ulonglong now= microsecond_interval_timer(); thd->prior_thr_create_utime= now; thd->start_utime= now; thd->thr_create_utime= now; - if (!setup_connection_thread_globals(thd)) - { - if (!login_connection(thd)) - { - prepare_new_connection_state(thd); - - /* - Check if THD is ok, as prepare_new_connection_state() - can fail, for example if init command failed. - */ - if (thd_is_connection_alive(thd)) - { - retval= 0; - thd->net.reading_or_writing= 1; - thd->skip_wait_timeout= true; - } - } - } + if (setup_connection_thread_globals(thd)) + goto end; + + if (thd_prepare_connection(thd)) + goto end; + + /* + Check if THD is ok, as prepare_new_connection_state() + can fail, for example if init command failed. + */ + if (!thd_is_connection_alive(thd)) + goto end; + + retval= 0; + thd->skip_wait_timeout= true; + set_thd_idle(thd); + +end: worker_context.restore(); return retval; } @@ -245,12 +277,13 @@ int threadpool_process_request(THD *thd) goto end; } + set_thd_idle(thd); + vio= thd->net.vio; if (!vio->has_data(vio)) { /* More info on this debug sync is in sql_parse.cc*/ DEBUG_SYNC(thd, "before_do_command_net_read"); - thd->net.reading_or_writing= 1; goto end; } } From 998f987eda62e6b3481ac3914538282715e2df4a Mon Sep 17 00:00:00 2001 From: Kristian Nielsen Date: Fri, 21 Oct 2016 22:37:51 +0200 Subject: [PATCH 19/44] Upstream MIPS test fixes from Debian Bug 838557. https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=838557 MIPS has a different errno for "directory not empty". --- mysql-test/extra/binlog_tests/database.test | 2 +- mysql-test/suite/rpl/t/rpl_drop_db.test | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mysql-test/extra/binlog_tests/database.test b/mysql-test/extra/binlog_tests/database.test index f111a028642e4..17f8e069fa3ce 100644 --- a/mysql-test/extra/binlog_tests/database.test +++ b/mysql-test/extra/binlog_tests/database.test @@ -52,7 +52,7 @@ eval SELECT 'hello' INTO OUTFILE 'fake_file.$prefix'; # Use '/' instead of '\' in the error message. On windows platform, dir is # formed with '\'. ---replace_regex /\\testing_1\\*/\/testing_1\// /66/39/ /17/39/ /247/39/ /File exists/Directory not empty/ +--replace_regex /\\testing_1\\*/\/testing_1\// /66/39/ /93/39/ /17/39/ /247/39/ /File exists/Directory not empty/ --error 1010 DROP DATABASE testing_1; let $wait_binlog_event= DROP TABLE IF EXIST; diff --git a/mysql-test/suite/rpl/t/rpl_drop_db.test b/mysql-test/suite/rpl/t/rpl_drop_db.test index dae1651dc93ae..f66187b12f515 100644 --- a/mysql-test/suite/rpl/t/rpl_drop_db.test +++ b/mysql-test/suite/rpl/t/rpl_drop_db.test @@ -13,7 +13,7 @@ insert into mysqltest1.t1 values (1); select * from mysqltest1.t1 into outfile 'mysqltest1/f1.txt'; create table mysqltest1.t2 (n int); create table mysqltest1.t3 (n int); ---replace_result \\ / 66 39 17 39 247 39 "File exists" "Directory not empty" +--replace_result \\ / 66 39 93 39 17 39 247 39 "File exists" "Directory not empty" --error 1010 drop database mysqltest1; use mysqltest1; @@ -30,7 +30,7 @@ while ($1) } --enable_query_log ---replace_result \\ / 66 39 17 39 247 39 "File exists" "Directory not empty" +--replace_result \\ / 66 39 93 39 17 39 247 39 "File exists" "Directory not empty" --error 1010 drop database mysqltest1; use mysqltest1; From 7eb4bd3f1ddd9b84425d51550b44c14ac0a8f1de Mon Sep 17 00:00:00 2001 From: Kristian Nielsen Date: Fri, 21 Oct 2016 22:43:46 +0200 Subject: [PATCH 20/44] Upstream patch from Debian Bug 838557 The patch fixes 128-bit multiply on mips64. This corrects a previous incorrect patch upstreamed from Debian. --- extra/yassl/taocrypt/src/integer.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/extra/yassl/taocrypt/src/integer.cpp b/extra/yassl/taocrypt/src/integer.cpp index fb8d9276bd9f8..dd8425396eda2 100644 --- a/extra/yassl/taocrypt/src/integer.cpp +++ b/extra/yassl/taocrypt/src/integer.cpp @@ -193,8 +193,9 @@ DWord() {} "a" (a), "rm" (b) : "cc"); #elif defined(__mips64) - __asm__("dmultu %2,%3" : "=d" (r.halfs_.high), "=l" (r.halfs_.low) - : "r" (a), "r" (b)); + unsigned __int128 t = (unsigned __int128) a * b; + r.halfs_.high = t >> 64; + r.halfs_.low = (word) t; #elif defined(_M_IX86) // for testing From 39b7affcb13f9f508242e90ecd5db03b3bb3cb85 Mon Sep 17 00:00:00 2001 From: Kristian Nielsen Date: Fri, 21 Oct 2016 23:02:56 +0200 Subject: [PATCH 21/44] Upstream MIPS 32bit-build-on-64bit patch from Debian Bug#838914 From https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=838914 Fixes CMake so that when building a 32-bit mips binary on a 64-bit mips machine, the target is not set as 32-bit, which apparently confused some tests in mroonga. --- cmake/package_name.cmake | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmake/package_name.cmake b/cmake/package_name.cmake index 4ba8fc18e3f4c..48ca3a4814dd1 100644 --- a/cmake/package_name.cmake +++ b/cmake/package_name.cmake @@ -30,6 +30,10 @@ IF(NOT VERSION) SET(64BIT 1) ENDIF() + IF(NOT 64BIT AND CMAKE_SYSTEM_PROCESSOR MATCHES "^mips64") + SET(DEFAULT_MACHINE "mips") + ENDIF() + IF(CMAKE_SYSTEM_NAME MATCHES "Windows") SET(NEED_DASH_BETWEEN_PLATFORM_AND_MACHINE 0) SET(DEFAULT_PLATFORM "win") From fb38d2642011c574cc9103ae1a1f9dd77f7f027e Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 22 Oct 2016 07:34:23 +0000 Subject: [PATCH 22/44] MDEV-11104 Fix client to correctly retrieve current user name on Windows Prior to this patch name of the user was read from environment variable USER, with a fallback to 'ODBC', if the environment variable is not set. The name of the env.variable is incorrect (USERNAME usually contains current user's name, but not USER), which made client to always determine current user as 'ODBC'. The fix is to use GetUserName() instead. --- libmysql/libmysql.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index 446f1da0b0c7d..3a08ea26b1d57 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -450,8 +450,9 @@ void read_user_name(char *name) void read_user_name(char *name) { - char *str=getenv("USER"); /* ODBC will send user variable */ - strmake(name,str ? str : "ODBC", USERNAME_LENGTH); + DWORD len= USERNAME_LENGTH; + if (!GetUserName(name, &len)) + strmov(name,"UNKNOWN_USER"); } #endif From 0c925aa9356ee9d31283510c2420d1b5f21f5c9c Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Sun, 23 Oct 2016 18:47:44 +0300 Subject: [PATCH 23/44] MDEV-11097 - Update the list of unstable tests --- mysql-test/unstable-tests | 89 ++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 53 deletions(-) diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests index 2dbaeeebc0bd6..3e25115599fdc 100644 --- a/mysql-test/unstable-tests +++ b/mysql-test/unstable-tests @@ -23,78 +23,66 @@ # ############################################################################## -main.bootstrap : Modified on 2016-06-18 (MDEV-9969) main.create_delayed : MDEV-10605 - failed with timeout -main.create_or_replace : Modified on 2016-06-23 (MDEV-9728) -main.ctype_recoding : Modified on 2016-06-10 (MDEV-10181) -main.ctype_utf8 : Modified on 2016-06-21 (merge) -main.ctype_utf8mb4 : Modified on 2016-06-21 (merge) -main.events_1 : Modified on 2016-06-21 (MDEV-9524) +main.ctype_utf32 : Modified on 2016-09-27 (merge) main.func_group : Modified on 2016-08-08 (MDEV-10468) -main.func_in : Modified on 2016-06-20 (MDEV-10020) main.func_math : Modified on 2016-08-10 (merge) main.func_misc : Modified on 2016-08-10 (merge) -main.grant2 : Modified on 2016-07-18 (MDEV-8569) -main.help : Modified on 2016-06-21 (MDEV-9524) +main.group_min_max_innodb : Modified on 2016-08-25 (MDEV-10595) main.host_cache_size_functionality : MDEV-10606 - sporadic failure on shutdown main.index_intersect_innodb : MDEV-10643 - failed with timeout -main.index_merge_innodb : MDEV-7142 - sporadic wrong execution plan +main.index_merge_myisam : Modified on 2016-09-05 (include file changed) +main.index_merge_innodb : Modified on 2016-09-05 (MDEV-7142) main.information_schema_stats : Modified on 2016-07-25 (MDEV-10428) main.innodb_mysql_lock : MDEV-7861 - sporadic lock detection failure -main.insert_innodb : Modified on 2016-06-14 (merge from upstream) main.loaddata : Modified on 2016-08-10 (merge) -main.locale : Modified on 2016-06-21 (merge) main.mdev-504 : MDEV-10607 - sporadic "can't connect" main.mdev375 : MDEV-10607 - sporadic "can't connect" main.merge : MDEV-10607 - sporadic "can't connect" -main.multi_update : Modified on 2016-06-20 (MDEV-5973) main.myisam_enable_keys-10506 : New test, added on 2016-08-10 (MDEV-10506) main.mysqlcheck : Modified on 2016-08-10 (merge) main.mysqldump : MDEV-10512 - sporadic assertion failure +main.mysqlhotcopy_myisam : MDEV-10995 - test hangs on debug build main.mysqltest : MDEV-9269 - fails on Alpha main.named_pipe : Modified on 2016-08-02 (MDEV-10383) -main.openssl_1 : Modified on 2016-07-11 (MDEV-10211) -main.parser : Modified on 2016-06-21 (merge) main.pool_of_threads : MDEV-10100 - sporadic error on detecting max connections main.ps : MDEV-11017 - sporadic wrong Prepared_stmt_count -main.ps_1general : Modified on 2016-07-12 (merge) main.range : Modified on 2016-08-10 (merge) main.range_mrr_icp : Modified on 2016-08-10 (merge) main.query_cache : MDEV-10611 - sporadic mutex problem -main.shutdown : MDEV-10612 - sporadic crashes +main.shutdown : MDEV-10563 - sporadic crashes main.sp-prelocking : Modified on 2016-08-10 (merge) main.sp-security : MDEV-10607 - sporadic "can't connect" -main.ssl : MDEV-10211 - different ciphers on some platforms -main.ssl_ca : Modified on 2016-07-11 (MDEV-10211) -main.ssl_compress : Modified on 2016-07-11 (MDEV-10211) -main.ssl_timeout : Modified on 2016-07-11 (MDEV-10211) +main.ssl_compress : MDEV-11110 - valgrind failures main.stat_tables_par_innodb : MDEV-10515 - sporadic wrong results -main.status_user : Modified on 2016-06-20 (MDEV-8633) main.subselect_innodb : MDEV-10614 - sporadic wrong results -main.temp_table : Modified on 2016-06-18 (MDEV-8569) main.type_date : Modified on 2016-08-10 (merge) -main.type_datetime : Modified on 2016-06-16 (MDEV-9374) +main.type_uint : Modified on 2016-09-27 (merge) main.view : Modified on 2016-08-10 (merge) main.xtradb_mrr : Modified on 2016-08-04 (MDEV-9946) #---------------------------------------------------------------- -archive.archive-big : MDEV-10615 - table is marked as crashed -archive.discover : MDEV-10510 - table is marked as crashed +archive.archive-big : MDEV-10615 - table is marked as crashed +archive.discover : MDEV-10510 - table is marked as crashed +archive.mysqlhotcopy_archive : MDEV-10995 - test hangs on debug build #---------------------------------------------------------------- binlog.binlog_commit_wait : MDEV-10150 - Error: too much time elapsed -binlog.binlog_dmls_on_tmp_tables_readonly : New test, added on 2016-05-04 (upstream) binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint #---------------------------------------------------------------- connect.tbl : MDEV-9844, MDEV-10179 - sporadic crashes, valgrind warnings, wrong results -connect.jdbc : New test, added on 2016-07-15 -connect.jdbc-new : New test, added on 2016-07-14 -connect.jdbc-oracle : New test, added on 2016-07-13 -connect.jdbc-postgresql : New test, added on 2016-07-13 + +#---------------------------------------------------------------- + +engines/rr_trx.* : MDEV-10998 - tests not maintained + +#---------------------------------------------------------------- + +extra/binlog_tests.database : Modified on 2016-10-21 (Upstream MIPS test fixes) #---------------------------------------------------------------- @@ -105,21 +93,19 @@ federated.federated_transactions : MDEV-10617, MDEV-10417 - Wrong checksum, time #---------------------------------------------------------------- -funcs_1.processlist_priv_no_prot : Include file modified on 2016-07-12 (merge) -funcs_1.processlist_priv_ps : Include file modified on 2016-07-12 (merge) +funcs_2/charset.* : MDEV-10999 - test not maintained #---------------------------------------------------------------- innodb.binlog_consistent : MDEV-10618 - Server fails to start innodb.innodb-alter-table : MDEV-10619 - Testcase timeout innodb.innodb-alter-tempfile : Modified on 2016-08-09 (MDEV-10469) -innodb.innodb_corrupt_bit : Modified on 2016-06-21 (merge) innodb.innodb_bug30423 : MDEV-7311 - Wrong number of rows in the plan -innodb.innodb-fk-warnings : Modified on 2016-07-18 (MDEV-8569) -innodb.innodb-fkcheck : Modified on 2016-06-13 (MDEV-10083) +innodb.innodb_bug54044 : Modified on 2016-09-27 (merge) innodb.innodb_monitor : MDEV-10939 - Testcase timeout innodb.innodb-wl5522 : rdiff file modified on 2016-08-10 (merge) innodb.innodb-wl5522-debug-zip : MDEV-10427 - Warning: database page corruption +innodb.system_tables : Added on 2016-09-23 (MDEV-10775) #---------------------------------------------------------------- @@ -144,22 +130,16 @@ parts.partition_int_myisam : MDEV-10621 - Testcase timeout #---------------------------------------------------------------- -perfschema.digest_table_full : Modified on 2016-06-21 (merge) perfschema.func_file_io : MDEV-5708 - fails for s390x perfschema.func_mutex : MDEV-5708 - fails for s390x perfschema.hostcache_ipv6_ssl : MDEV-10696 - crash on shutdown -perfschema.rpl_gtid_func : Modified on 2016-06-21 (merge) -perfschema.sizing_low : Modified on 2016-04-26 (5.6.30 merge) perfschema.socket_summary_by_event_name_func : MDEV-10622 - Socket summary tables do not match -perfschema.start_server_low_digest : Modified on 2016-06-21 (merge) -perfschema.statement_digest : Modified on 2016-06-21 (merge) -perfschema.statement_digest_consumers : Modified on 2016-06-21 (merge) -perfschema.statement_digest_long_query : Modified on 2016-06-21 (merge) -perfschema.table_name : New test, added on 2016-04-26 (5.6.30 merge) + +perfschema_stress.* : MDEV-10996 - tests not maintained #---------------------------------------------------------------- -plugins.feedback_plugin_send : MDEV-7932 - ssl failed for url +plugins.feedback_plugin_send : MDEV-7932 - ssl failed for url, MDEV-11112 - valgrind warnings plugins.pam : Modified on 2016-08-03 (MDEV-7329) plugins.pam_cleartext : Modified on 2016-08-03 plugins.server_audit : MDEV-9562 - crashes on sol10-sparc @@ -167,11 +147,6 @@ plugins.thread_pool_server_audit : MDEV-9562 - crashes on sol10-sparc #---------------------------------------------------------------- -roles.rpl_grant_revoke_current_role-8638 : New test, added on 2016-06-20 (MDEV-8638) -roles.set_role-9614 : New test, added on 2016-05-30 (MDEV-9614) - -#---------------------------------------------------------------- - rpl.last_insert_id : MDEV-10625 - warnings in error log rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips @@ -180,11 +155,11 @@ rpl.rpl_binlog_index : MDEV-9501 - Warning: failed registering rpl.rpl_checksum_cache : MDEV-10626 - Testcase timeout rpl.rpl_circular_for_4_hosts : MDEV-10627 - Testcase timeout rpl.rpl_ddl : MDEV-10417 - Fails on Mips +rpl.rpl_drop_db : Modified on 2016-10-21 (Upstream MIPS test fixes) rpl.rpl_gtid_crash : MDEV-9501 - Warning: failed registering on master rpl.rpl_gtid_master_promote : MDEV-10628 - Timeout in sync_with_master rpl.rpl_gtid_stop_start : MDEV-10629 - Crash on shutdown rpl.rpl_gtid_until : MDEV-10625 - warnings in error log -rpl.rpl_ignore_table : Modified on 2016-06-22 rpl.rpl_innodb_bug30888 : MDEV-10417 - Fails on Mips rpl.rpl_insert : MDEV-9329 - Fails on Ubuntu/s390x rpl.rpl_insert_delayed : MDEV-9329 - Fails on Ubuntu/s390x @@ -204,6 +179,8 @@ rpl.rpl_temporary_error2 : MDEV-10634 - Wrong number of retries rpl.sec_behind_master-5114 : MDEV-8518 - Wrong value of Seconds_Behind_Master rpl.rpl_skip_replication : MDEV-9268 - Fails with timeout in sync_slave_with_master on Alpha +rpl/extra/rpl_tests.* : MDEV-10994 - tests not maintained + #---------------------------------------------------------------- spider.* : MDEV-9329 - tests are too memory-consuming @@ -217,6 +194,10 @@ spider/bg.vp_fixes : MDEV-9329 - Fails on Ubuntu/s390x #---------------------------------------------------------------- +sphinx.* : MDEV-10747 - tests are not run in buildbot, they can't be stable + +#---------------------------------------------------------------- + stress.ddl_innodb : MDEV-10635 - Testcase timeout #---------------------------------------------------------------- @@ -232,11 +213,14 @@ tokudb.background_job_manager : MDEV-10327 - Assertion failure on server tokudb.cluster_filter_unpack_varchar : MDEV-10636 - Wrong execution plan tokudb.* : MDEV-9891 - massive crashes on shutdown tokudb_alter_table.* : MDEV-9891 - massive crashes on shutdown +tokudb_backup.* : MDEV-11001 - tests don't work tokudb_bugs.checkpoint_lock : MDEV-10637 - Wrong processlist output tokudb_bugs.checkpoint_lock_3 : MDEV-10637 - Wrong processlist output tokudb_bugs.* : MDEV-9891 - massive crashes on shutdown tokudb_parts.* : MDEV-9891 - massive crashes on shutdown -rpl-tokudb.* : MDEV-9891 - massive crashes on shutdown, also modified on 2016-06-10 (Merge) +tokudb_rpl_suites.* : MDEV-11001 - tests don't work +tokudb_sys_vars.* : MDEV-11001 - tests don't work +rpl-tokudb.* : MDEV-9891 - massive crashes on shutdown tokudb/tokudb_add_index.* : MDEV-9891 - massive crashes on shutdown tokudb/tokudb_backup.* : MDEV-9891 - massive crashes on shutdown tokudb/tokudb_mariadb.* : MDEV-9891 - massive crashes on shutdown @@ -250,7 +234,6 @@ unit.ma_test_loghandler : MDEV-10638 - record read not ok #---------------------------------------------------------------- -vcol.charsets : Added on 2016-06-23 vcol.not_supported : MDEV-10639 - Testcase timeout vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout From 3321f1adc74b54e7534000c06eeca166730ccc4a Mon Sep 17 00:00:00 2001 From: Don Lewis Date: Tue, 21 Jun 2016 13:35:59 +1000 Subject: [PATCH 24/44] MDEV-5944: Compile fix for OQGRAPH with LLVM Clang/LLVM has more strict schemantics than gcc. This patch quantifies the namesspace such that it will compile using clang. --- storage/oqgraph/graphcore.cc | 2 +- storage/oqgraph/oqgraph_shim.h | 48 +++++++++++++++++++--------------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/storage/oqgraph/graphcore.cc b/storage/oqgraph/graphcore.cc index 4346b94805c7e..7c8ca53c0969c 100644 --- a/storage/oqgraph/graphcore.cc +++ b/storage/oqgraph/graphcore.cc @@ -485,7 +485,7 @@ namespace open_query optional oqgraph_share::find_vertex(VertexID id) const { - return ::boost::find_vertex(id, g); + return oqgraph3::find_vertex(id, g); } #if 0 diff --git a/storage/oqgraph/oqgraph_shim.h b/storage/oqgraph/oqgraph_shim.h index af240b88ebdcb..004d7f0f7c509 100644 --- a/storage/oqgraph/oqgraph_shim.h +++ b/storage/oqgraph/oqgraph_shim.h @@ -274,6 +274,33 @@ namespace boost }; #endif + template<> + struct property_map + { + typedef void type; + typedef oqgraph3::edge_weight_property_map const_type; + }; + + template<> + struct property_map + { + typedef void type; + typedef oqgraph3::vertex_index_property_map const_type; + }; + + template<> + struct property_map + { + typedef void type; + typedef oqgraph3::edge_index_property_map const_type; + }; + +} + +namespace oqgraph3 +{ + using namespace boost; + inline graph_traits::vertex_descriptor source( const graph_traits::edge_descriptor& e, @@ -401,27 +428,6 @@ namespace boost return count; } - template<> - struct property_map - { - typedef void type; - typedef oqgraph3::edge_weight_property_map const_type; - }; - - template<> - struct property_map - { - typedef void type; - typedef oqgraph3::vertex_index_property_map const_type; - }; - - template<> - struct property_map - { - typedef void type; - typedef oqgraph3::edge_index_property_map const_type; - }; - inline property_map< oqgraph3::graph, edge_weight_t>::const_type::reference From ba11dd69fee7b82edf4e6afbb13e3fa94cd885ca Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 25 Oct 2016 12:21:53 +0000 Subject: [PATCH 25/44] MDEV-11127 : Fix innochecksum to work with large files on Windows. - don't use stat() for file size, it doesn not handle large size use GetFileSizeEx() instead - don't use lseek(), it can't handle large files, use _lseeki64() instead. - Also, switch off OS file buffering for innochecksum on Windows, to avoid thrashing file cache. --- extra/innochecksum.cc | 55 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 5 deletions(-) diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc index 6018a4884ea5b..97d47b4563af7 100644 --- a/extra/innochecksum.cc +++ b/extra/innochecksum.cc @@ -243,10 +243,9 @@ int main(int argc, char **argv) time_t lastt; /* last time */ ulint oldcsum, oldcsumfield, csum, csumfield, crc32, logseq, logseqfield; /* ulints for checksum storage */ - struct stat st; /* for stat, if you couldn't guess */ unsigned long long int size; /* size of file (has to be 64 bits) */ ulint pages; /* number of pages in file */ - off_t offset= 0; + unsigned long long offset= 0; int fd; printf("InnoDB offline file checksum utility.\n"); @@ -269,6 +268,47 @@ int main(int argc, char **argv) goto error; } +#ifdef _WIN32 + /* Switch off OS file buffering for the file. */ + + HANDLE h = CreateFile(filename, GENERIC_READ, + FILE_SHARE_READ|FILE_SHARE_WRITE, 0, + OPEN_EXISTING, FILE_FLAG_NO_BUFFERING, 0); + + if (!h) + { + fprintf(stderr, "Error; cant open file\n"); + goto error; + } + + if (!GetFileSizeEx(h, (LARGE_INTEGER *)&size)) + { + fprintf(stderr, "Error; GetFileSize() failed\n"); + goto error; + } + + fd = _open_osfhandle ((intptr_t) h, _O_RDONLY); + if (fd < 0) + { + fprintf(stderr, "Error; _open_osfhandle() failed\n"); + goto error; + } + + f = _fdopen(fd, "rb"); + if (!f) + { + fprintf(stderr, "Error; fdopen() failed\n"); + goto error; + } + + /* + Disable stdio buffering (FILE_FLAG_NO_BUFFERING requires properly IO buffers + which stdio does not guarantee. + */ + setvbuf(f, NULL, _IONBF, 0); + +#else + struct stat st; /* stat the file to get size and page count */ if (stat(filename, &st)) { @@ -279,6 +319,8 @@ int main(int argc, char **argv) /* Open the file for reading */ f= fopen(filename, "rb"); +#endif + if (f == NULL) { fprintf(stderr, "Error; %s cannot be opened", filename); @@ -323,7 +365,7 @@ int main(int argc, char **argv) } else if (verbose) { - printf("file %s = %llu bytes (%lu pages)...\n", filename, size, pages); + printf("file %s = %llu bytes (%lu pages)...\n", filename, size, (ulong)pages); if (do_one_page) printf("InnoChecksum; checking page %lu\n", do_page); else @@ -348,9 +390,12 @@ int main(int argc, char **argv) goto error; } - offset= (off_t)start_page * (off_t)physical_page_size; - + offset= (ulonglong)start_page * (ulonglong)physical_page_size; +#ifdef _WIN32 + if (_lseeki64(fd, offset, SEEK_SET) != offset) +#else if (lseek(fd, offset, SEEK_SET) != offset) +#endif { perror("Error; Unable to seek to necessary offset"); goto error; From 39dceaae607e2c9f53146d5b23f8dee330643cb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Sun, 9 Oct 2016 12:09:44 +0200 Subject: [PATCH 26/44] MDEV-10983: TokuDB does not compile on OS X 10.12 Make use of a different function to get the current tid. Additionally, librt doesn't exist on OS X. Use System library instead. --- .../PerconaFT/cmake_modules/TokuFeatureDetection.cmake | 4 +++- storage/tokudb/PerconaFT/portability/portability.cc | 9 ++++++++- storage/tokudb/PerconaFT/portability/tests/test-xid.cc | 9 ++++++++- storage/tokudb/PerconaFT/portability/toku_config.h.in | 1 + 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake index 4c5004cd6a538..883f35041e2fd 100644 --- a/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake +++ b/storage/tokudb/PerconaFT/cmake_modules/TokuFeatureDetection.cmake @@ -97,7 +97,7 @@ if (NOT HAVE_BACKTRACE_WITHOUT_EXECINFO) endif () endif () -if(HAVE_CLOCK_REALTIME) +if(HAVE_CLOCK_REALTIME AND (NOT APPLE)) list(APPEND EXTRA_SYSTEM_LIBS rt) else() list(APPEND EXTRA_SYSTEM_LIBS System) @@ -109,6 +109,8 @@ check_function_exists(pthread_rwlockattr_setkind_np HAVE_PTHREAD_RWLOCKATTR_SETK ## check for the right way to yield using pthreads check_function_exists(pthread_yield HAVE_PTHREAD_YIELD) check_function_exists(pthread_yield_np HAVE_PTHREAD_YIELD_NP) +## check if we have pthread_threadid_np() (i.e. osx) +check_function_exists(pthread_threadid_np HAVE_PTHREAD_THREADID_NP) ## check if we have pthread_getthreadid_np() (i.e. freebsd) check_function_exists(pthread_getthreadid_np HAVE_PTHREAD_GETTHREADID_NP) check_function_exists(sched_getcpu HAVE_SCHED_GETCPU) diff --git a/storage/tokudb/PerconaFT/portability/portability.cc b/storage/tokudb/PerconaFT/portability/portability.cc index ba9f8d48ed5dc..19f445a85d7f4 100644 --- a/storage/tokudb/PerconaFT/portability/portability.cc +++ b/storage/tokudb/PerconaFT/portability/portability.cc @@ -63,6 +63,9 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #if defined(HAVE_SYS_SYSCTL_H) # include #endif +#if defined(HAVE_PTHREAD_H) +# include +#endif #if defined(HAVE_PTHREAD_NP_H) # include #endif @@ -102,7 +105,11 @@ toku_os_getpid(void) { int toku_os_gettid(void) { -#if defined(__NR_gettid) +#if defined(HAVE_PTHREAD_THREADID_NP) + uint64_t result; + pthread_threadid_np(NULL, &result); + return (int) result; // Used for instrumentation so overflow is ok here. +#elif defined(__NR_gettid) return syscall(__NR_gettid); #elif defined(SYS_gettid) return syscall(SYS_gettid); diff --git a/storage/tokudb/PerconaFT/portability/tests/test-xid.cc b/storage/tokudb/PerconaFT/portability/tests/test-xid.cc index 9ee68906bb37a..71736f898ef87 100644 --- a/storage/tokudb/PerconaFT/portability/tests/test-xid.cc +++ b/storage/tokudb/PerconaFT/portability/tests/test-xid.cc @@ -51,11 +51,18 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #if defined(HAVE_PTHREAD_NP_H) # include #endif +#if defined(HAVE_PTHREAD_H) +# include +#endif // since we implement the same thing here as in toku_os_gettid, this test // is pretty pointless static int gettid(void) { -#if defined(__NR_gettid) +#if defined(HAVE_PTHREAD_THREADID_NP) + uint64_t result; + pthread_threadid_np(NULL, &result); + return (int) result; +#elif defined(__NR_gettid) return syscall(__NR_gettid); #elif defined(SYS_gettid) return syscall(SYS_gettid); diff --git a/storage/tokudb/PerconaFT/portability/toku_config.h.in b/storage/tokudb/PerconaFT/portability/toku_config.h.in index 1a34bf1ef45a7..18f6779796fe5 100644 --- a/storage/tokudb/PerconaFT/portability/toku_config.h.in +++ b/storage/tokudb/PerconaFT/portability/toku_config.h.in @@ -87,6 +87,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #cmakedefine HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP 1 #cmakedefine HAVE_PTHREAD_YIELD 1 #cmakedefine HAVE_PTHREAD_YIELD_NP 1 +#cmakedefine HAVE_PTHREAD_THREADID_NP 1 #cmakedefine HAVE_PTHREAD_GETTHREADID_NP 1 #cmakedefine PTHREAD_YIELD_RETURNS_INT 1 From 1daf746e31e38a3ec1cdcb9427153b65f744dcda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 25 Oct 2016 16:34:22 +0300 Subject: [PATCH 27/44] Add tokuftdump man page The man page was already present in the debian release of MariaDB 10.0. --- man/CMakeLists.txt | 2 +- tokuftdump.1 | 237 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 238 insertions(+), 1 deletion(-) create mode 100644 tokuftdump.1 diff --git a/man/CMakeLists.txt b/man/CMakeLists.txt index c4383b31a1787..c6163fee5372d 100644 --- a/man/CMakeLists.txt +++ b/man/CMakeLists.txt @@ -22,7 +22,7 @@ SET(MAN1_SERVER innochecksum.1 my_print_defaults.1 myisam_ftdump.1 myisamchk.1 mysql_tzinfo_to_sql.1 mysql_upgrade.1 mysql_zap.1 mysqld_multi.1 mysqld_safe.1 mysqldumpslow.1 mysqlhotcopy.1 mysqltest.1 perror.1 replace.1 resolve_stack_dump.1 - resolveip.1 mysqlbug.1) + resolveip.1 mysqlbug.1 tokuftdump.1) SET(MAN8_SERVER mysqld.8) SET(MAN1_CLIENT msql2mysql.1 mysql.1 mysql_find_rows.1 mysql_waitpid.1 mysqlaccess.1 mysqladmin.1 mysqlbinlog.1 mysqlcheck.1 diff --git a/tokuftdump.1 b/tokuftdump.1 new file mode 100644 index 0000000000000..3d9faae30cacc --- /dev/null +++ b/tokuftdump.1 @@ -0,0 +1,237 @@ +'\" t +.\" +.TH "\FBTOKUFTDUMP\FR" "1" "04/07/2016" "MariaDB 10\&.0" "MariaDB Database System" +.\" ----------------------------------------------------------------- +.\" * set default formatting +.\" ----------------------------------------------------------------- +.\" disable hyphenation +.nh +.\" disable justification (adjust text to left margin only) +.ad l +.\" ----------------------------------------------------------------- +.\" * MAIN CONTENT STARTS HERE * +.\" ----------------------------------------------------------------- +.\" tokuftdump +.\" upgrading MySQL +.SH "NAME" +tokuftdump \- look into the fractal tree file +.SH "SYNOPSIS" +.HP \w'\fBtokuftdump\ [\fR\fB\fIoptions\fR\fR\fB]\fR\ 'u +\fBtokuftdump [\fR\fB\fIoptions\fR\fR\fB]\fR +.SH "DESCRIPTION" +.PP +\fBtokuftdump\fR +Investigates and diagnoses the fractal tree\&. +.PP +\fBtokuftdump\fR +supports the following options for processing option files\&. +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: interactive option +.\" interactive option: tokuftdump +\fB\-\-interactive\fR +.sp +Interactive\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: support option +.\" support option: tokuftdump +\fB\-\-support \fI/path/to/fractal-tree/file\fR +.sp +An interactive way to see what messages and/or switch between FTs\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: json option +.\" json option: tokuftdump +\fB\-\-json \fI/path/to/fractal-tree/file [output_json_file]\fR +.sp +If the output json file is left empty, FT\&.json will be created automatically\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: nodata option +.\" nodata option: tokuftdump +\fB\-\-nodata\fR +.sp +Nodata\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: dumpdata option +.\" dumpdata option: tokuftdump +\fB\-\-dumpdata = \fR\fB\fI0|1\fR\fR +.sp +Dumpdata\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: header option +.\" header option: tokuftdump +\fB\-\-header\fR +.sp +Header\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: rootnode option +.\" rootnode option: tokuftdump +\fB\-\-rootnode\fR +.sp +Rootnode\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: node option +.\" node option: tokuftdump +\fB\-\-node \fIN\fR +.sp +Node\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: fragmentation option +.\" fragmentation option: tokuftdump +\fB\-\-fragmentation\fR +.sp +Fragmentation\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: garbage option +.\" garbage option: tokuftdump +\fB\-\-garbage\fR +.sp +Garbage\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: tsv option +.\" tsv option: tokuftdump +\fB\-\-tsv\fR +.sp +TSV\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: translation-table option +.\" translation-table option: tokuftdump +\fB\-\-translation\-table\fR +.sp +Translation table\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +.\" tokuftdump: summary option +.\" summary option: tokuftdump +\fB\-\-summary\fR +.sp +Provide summary info\&. +.RE +.SH "COPYRIGHT" +.br +.PP +Copyright 2016 MariaDB Foundation +.PP +This documentation is free software; you can redistribute it and/or modify it only under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. +.PP +This documentation is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +.PP +You should have received a copy of the GNU General Public License along with the program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or see http://www.gnu.org/licenses/. +.sp +.SH "SEE ALSO" +For more information, please refer to the MariaDB Knowledge Base, available online at https://mariadb.com/kb/ +.SH AUTHOR +MariaDB Foundation (http://www.mariadb.org/). From ed3998ae7cc286860670bc9a285aeb99c5edcced Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 25 Oct 2016 15:46:10 +0200 Subject: [PATCH 28/44] Revert "Add tokuftdump man page" This reverts commit 1daf746e31e38a3ec1cdcb9427153b65f744dcda. Removed temporarily to make sure there are no legal problems. --- man/CMakeLists.txt | 2 +- tokuftdump.1 | 237 --------------------------------------------- 2 files changed, 1 insertion(+), 238 deletions(-) delete mode 100644 tokuftdump.1 diff --git a/man/CMakeLists.txt b/man/CMakeLists.txt index c6163fee5372d..c4383b31a1787 100644 --- a/man/CMakeLists.txt +++ b/man/CMakeLists.txt @@ -22,7 +22,7 @@ SET(MAN1_SERVER innochecksum.1 my_print_defaults.1 myisam_ftdump.1 myisamchk.1 mysql_tzinfo_to_sql.1 mysql_upgrade.1 mysql_zap.1 mysqld_multi.1 mysqld_safe.1 mysqldumpslow.1 mysqlhotcopy.1 mysqltest.1 perror.1 replace.1 resolve_stack_dump.1 - resolveip.1 mysqlbug.1 tokuftdump.1) + resolveip.1 mysqlbug.1) SET(MAN8_SERVER mysqld.8) SET(MAN1_CLIENT msql2mysql.1 mysql.1 mysql_find_rows.1 mysql_waitpid.1 mysqlaccess.1 mysqladmin.1 mysqlbinlog.1 mysqlcheck.1 diff --git a/tokuftdump.1 b/tokuftdump.1 deleted file mode 100644 index 3d9faae30cacc..0000000000000 --- a/tokuftdump.1 +++ /dev/null @@ -1,237 +0,0 @@ -'\" t -.\" -.TH "\FBTOKUFTDUMP\FR" "1" "04/07/2016" "MariaDB 10\&.0" "MariaDB Database System" -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.\" tokuftdump -.\" upgrading MySQL -.SH "NAME" -tokuftdump \- look into the fractal tree file -.SH "SYNOPSIS" -.HP \w'\fBtokuftdump\ [\fR\fB\fIoptions\fR\fR\fB]\fR\ 'u -\fBtokuftdump [\fR\fB\fIoptions\fR\fR\fB]\fR -.SH "DESCRIPTION" -.PP -\fBtokuftdump\fR -Investigates and diagnoses the fractal tree\&. -.PP -\fBtokuftdump\fR -supports the following options for processing option files\&. -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: interactive option -.\" interactive option: tokuftdump -\fB\-\-interactive\fR -.sp -Interactive\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: support option -.\" support option: tokuftdump -\fB\-\-support \fI/path/to/fractal-tree/file\fR -.sp -An interactive way to see what messages and/or switch between FTs\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: json option -.\" json option: tokuftdump -\fB\-\-json \fI/path/to/fractal-tree/file [output_json_file]\fR -.sp -If the output json file is left empty, FT\&.json will be created automatically\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: nodata option -.\" nodata option: tokuftdump -\fB\-\-nodata\fR -.sp -Nodata\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: dumpdata option -.\" dumpdata option: tokuftdump -\fB\-\-dumpdata = \fR\fB\fI0|1\fR\fR -.sp -Dumpdata\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: header option -.\" header option: tokuftdump -\fB\-\-header\fR -.sp -Header\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: rootnode option -.\" rootnode option: tokuftdump -\fB\-\-rootnode\fR -.sp -Rootnode\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: node option -.\" node option: tokuftdump -\fB\-\-node \fIN\fR -.sp -Node\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: fragmentation option -.\" fragmentation option: tokuftdump -\fB\-\-fragmentation\fR -.sp -Fragmentation\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: garbage option -.\" garbage option: tokuftdump -\fB\-\-garbage\fR -.sp -Garbage\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: tsv option -.\" tsv option: tokuftdump -\fB\-\-tsv\fR -.sp -TSV\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: translation-table option -.\" translation-table option: tokuftdump -\fB\-\-translation\-table\fR -.sp -Translation table\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" tokuftdump: summary option -.\" summary option: tokuftdump -\fB\-\-summary\fR -.sp -Provide summary info\&. -.RE -.SH "COPYRIGHT" -.br -.PP -Copyright 2016 MariaDB Foundation -.PP -This documentation is free software; you can redistribute it and/or modify it only under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. -.PP -This documentation is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -.PP -You should have received a copy of the GNU General Public License along with the program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or see http://www.gnu.org/licenses/. -.sp -.SH "SEE ALSO" -For more information, please refer to the MariaDB Knowledge Base, available online at https://mariadb.com/kb/ -.SH AUTHOR -MariaDB Foundation (http://www.mariadb.org/). From d7dc03a26797f07625e8c44d2d1ac7f76e860bad Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 25 Oct 2016 17:01:37 +0200 Subject: [PATCH 29/44] 5.6.33-79.0 --- storage/xtradb/buf/buf0buf.cc | 4 +- storage/xtradb/buf/buf0dblwr.cc | 2 +- storage/xtradb/buf/buf0flu.cc | 5 + storage/xtradb/dict/dict0boot.cc | 4 + storage/xtradb/dict/dict0crea.cc | 583 ++++++++++++++++++++++ storage/xtradb/dict/dict0dict.cc | 158 ++++++ storage/xtradb/dict/dict0load.cc | 159 +++++- storage/xtradb/fil/fil0fil.cc | 2 + storage/xtradb/fts/fts0fts.cc | 31 ++ storage/xtradb/handler/ha_innodb.cc | 392 ++++++++++++++- storage/xtradb/handler/ha_innodb.h | 37 ++ storage/xtradb/handler/handler0alter.cc | 66 ++- storage/xtradb/handler/i_s.cc | 2 + storage/xtradb/handler/xtradb_i_s.cc | 350 +++++++++++++ storage/xtradb/handler/xtradb_i_s.h | 2 + storage/xtradb/include/data0type.h | 14 + storage/xtradb/include/data0type.ic | 16 + storage/xtradb/include/dict0boot.h | 32 ++ storage/xtradb/include/dict0crea.h | 91 ++++ storage/xtradb/include/dict0dict.h | 46 ++ storage/xtradb/include/dict0load.h | 29 ++ storage/xtradb/include/fts0fts.h | 10 + storage/xtradb/include/os0thread.h | 15 +- storage/xtradb/include/rem0types.h | 3 + storage/xtradb/include/row0mysql.h | 85 +++- storage/xtradb/include/srv0srv.h | 5 + storage/xtradb/include/univ.i | 2 +- storage/xtradb/log/log0log.cc | 20 +- storage/xtradb/log/log0online.cc | 33 +- storage/xtradb/mach/mach0data.cc | 13 +- storage/xtradb/os/os0thread.cc | 24 +- storage/xtradb/rem/rem0rec.cc | 23 +- storage/xtradb/row/row0ftsort.cc | 2 +- storage/xtradb/row/row0log.cc | 14 +- storage/xtradb/row/row0merge.cc | 18 +- storage/xtradb/row/row0mysql.cc | 634 +++++++++++++++++++++++- storage/xtradb/row/row0sel.cc | 45 +- storage/xtradb/srv/srv0start.cc | 6 + 38 files changed, 2892 insertions(+), 85 deletions(-) diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 321a1d9f673a3..978d94f07ec76 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -4492,7 +4492,9 @@ buf_page_io_complete( recv_recover_page(TRUE, (buf_block_t*) bpage); } - if (uncompressed && !recv_no_ibuf_operations) { + if (uncompressed && !recv_no_ibuf_operations + && fil_page_get_type(frame) == FIL_PAGE_INDEX + && page_is_leaf(frame)) { buf_block_t* block; ibool update_ibuf_bitmap; diff --git a/storage/xtradb/buf/buf0dblwr.cc b/storage/xtradb/buf/buf0dblwr.cc index f4d1c637e3e6f..3c12d6da73f3f 100644 --- a/storage/xtradb/buf/buf0dblwr.cc +++ b/storage/xtradb/buf/buf0dblwr.cc @@ -521,7 +521,7 @@ buf_dblwr_process() if (buf_page_is_corrupted(true, read_buf, zip_size)) { fprintf(stderr, - "InnoDB: Warning: database page" + "InnoDB: Database page" " corruption or a failed\n" "InnoDB: file read of" " space %lu page %lu.\n" diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc index 14a5fbde7e820..5dd2efcf0c36b 100644 --- a/storage/xtradb/buf/buf0flu.cc +++ b/storage/xtradb/buf/buf0flu.cc @@ -2568,6 +2568,11 @@ page_cleaner_sleep_if_needed( ulint next_loop_time) /*!< in: time when next loop iteration should start */ { + /* No sleep if we are cleaning the buffer pool during the shutdown + with everything else finished */ + if (srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE) + return; + ulint cur_time = ut_time_ms(); if (next_loop_time > cur_time) { diff --git a/storage/xtradb/dict/dict0boot.cc b/storage/xtradb/dict/dict0boot.cc index 94a3af2852b39..c0bb0298bea72 100644 --- a/storage/xtradb/dict/dict0boot.cc +++ b/storage/xtradb/dict/dict0boot.cc @@ -272,6 +272,10 @@ dict_boot(void) ut_ad(DICT_NUM_FIELDS__SYS_FOREIGN_FOR_NAME == 2); ut_ad(DICT_NUM_COLS__SYS_FOREIGN_COLS == 4); ut_ad(DICT_NUM_FIELDS__SYS_FOREIGN_COLS == 6); + ut_ad(DICT_NUM_COLS__SYS_ZIP_DICT == 3); + ut_ad(DICT_NUM_FIELDS__SYS_ZIP_DICT == 5); + ut_ad(DICT_NUM_COLS__SYS_ZIP_DICT_COLS == 3); + ut_ad(DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS == 5); mtr_start(&mtr); diff --git a/storage/xtradb/dict/dict0crea.cc b/storage/xtradb/dict/dict0crea.cc index a4fcf57c02844..9460ce5165977 100644 --- a/storage/xtradb/dict/dict0crea.cc +++ b/storage/xtradb/dict/dict0crea.cc @@ -38,6 +38,7 @@ Created 1/8/1996 Heikki Tuuri #include "que0que.h" #include "row0ins.h" #include "row0mysql.h" +#include "row0sel.h" #include "pars0pars.h" #include "trx0roll.h" #include "usr0sess.h" @@ -1790,6 +1791,135 @@ dict_create_or_check_sys_tablespace(void) return(err); } +/** Creates the zip_dict system table inside InnoDB +at server bootstrap or server start if it is not found or is +not of the right form. +@return DB_SUCCESS or error code */ +UNIV_INTERN +dberr_t +dict_create_or_check_sys_zip_dict(void) +{ + trx_t* trx; + my_bool srv_file_per_table_backup; + dberr_t err; + dberr_t sys_zip_dict_err; + dberr_t sys_zip_dict_cols_err; + + ut_a(srv_get_active_thread_type() == SRV_NONE); + + /* Note: The master thread has not been started at this point. */ + + sys_zip_dict_err = dict_check_if_system_table_exists( + "SYS_ZIP_DICT", DICT_NUM_FIELDS__SYS_ZIP_DICT + 1, 2); + sys_zip_dict_cols_err = dict_check_if_system_table_exists( + "SYS_ZIP_DICT_COLS", DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS + 1, + 1); + + if (sys_zip_dict_err == DB_SUCCESS && + sys_zip_dict_cols_err == DB_SUCCESS) + return (DB_SUCCESS); + + trx = trx_allocate_for_mysql(); + + trx_set_dict_operation(trx, TRX_DICT_OP_TABLE); + + trx->op_info = "creating zip_dict and zip_dict_cols sys tables"; + + row_mysql_lock_data_dictionary(trx); + + /* Check which incomplete table definition to drop. */ + + if (sys_zip_dict_err == DB_CORRUPTION) { + ib_logf(IB_LOG_LEVEL_WARN, + "Dropping incompletely created " + "SYS_ZIP_DICT table."); + row_drop_table_for_mysql("SYS_ZIP_DICT", trx, TRUE); + } + if (sys_zip_dict_cols_err == DB_CORRUPTION) { + ib_logf(IB_LOG_LEVEL_WARN, + "Dropping incompletely created " + "SYS_ZIP_DICT_COLS table."); + row_drop_table_for_mysql("SYS_ZIP_DICT_COLS", trx, TRUE); + } + + ib_logf(IB_LOG_LEVEL_INFO, + "Creating zip_dict and zip_dict_cols system tables."); + + /* We always want SYSTEM tables to be created inside the system + tablespace. */ + srv_file_per_table_backup = srv_file_per_table; + srv_file_per_table = 0; + + err = que_eval_sql( + NULL, + "PROCEDURE CREATE_SYS_ZIP_DICT_PROC () IS\n" + "BEGIN\n" + "CREATE TABLE SYS_ZIP_DICT(\n" + " ID INT UNSIGNED NOT NULL,\n" + " NAME CHAR(" + STRINGIFY_ARG(ZIP_DICT_MAX_NAME_LENGTH) + ") NOT NULL,\n" + " DATA BLOB NOT NULL\n" + ");\n" + "CREATE UNIQUE CLUSTERED INDEX SYS_ZIP_DICT_ID" + " ON SYS_ZIP_DICT (ID);\n" + "CREATE UNIQUE INDEX SYS_ZIP_DICT_NAME" + " ON SYS_ZIP_DICT (NAME);\n" + "CREATE TABLE SYS_ZIP_DICT_COLS(\n" + " TABLE_ID INT UNSIGNED NOT NULL,\n" + " COLUMN_POS INT UNSIGNED NOT NULL,\n" + " DICT_ID INT UNSIGNED NOT NULL\n" + ");\n" + "CREATE UNIQUE CLUSTERED INDEX SYS_ZIP_DICT_COLS_COMPOSITE" + " ON SYS_ZIP_DICT_COLS (TABLE_ID, COLUMN_POS);\n" + "END;\n", + FALSE, trx); + + if (err != DB_SUCCESS) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Creation of SYS_ZIP_DICT and SYS_ZIP_DICT_COLS" + "has failed with error %lu. Tablespace is full. " + "Dropping incompletely created tables.", + (ulong) err); + + ut_a(err == DB_OUT_OF_FILE_SPACE + || err == DB_TOO_MANY_CONCURRENT_TRXS); + + row_drop_table_for_mysql("SYS_ZIP_DICT", trx, TRUE); + row_drop_table_for_mysql("SYS_ZIP_DICT_COLS", trx, TRUE); + + if (err == DB_OUT_OF_FILE_SPACE) { + err = DB_MUST_GET_MORE_FILE_SPACE; + } + } + + trx_commit_for_mysql(trx); + + row_mysql_unlock_data_dictionary(trx); + + trx_free_for_mysql(trx); + + srv_file_per_table = srv_file_per_table_backup; + + if (err == DB_SUCCESS) { + ib_logf(IB_LOG_LEVEL_INFO, + "zip_dict and zip_dict_cols system tables created."); + } + + /* Note: The master thread has not been started at this point. */ + /* Confirm and move to the non-LRU part of the table LRU list. */ + + sys_zip_dict_err = dict_check_if_system_table_exists( + "SYS_ZIP_DICT", DICT_NUM_FIELDS__SYS_ZIP_DICT + 1, 2); + ut_a(sys_zip_dict_err == DB_SUCCESS); + sys_zip_dict_cols_err = dict_check_if_system_table_exists( + "SYS_ZIP_DICT_COLS", + DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS + 1, 1); + ut_a(sys_zip_dict_cols_err == DB_SUCCESS); + + return(err); +} + /********************************************************************//** Add a single tablespace definition to the data dictionary tables in the database. @@ -1843,3 +1973,456 @@ dict_create_add_tablespace_to_dictionary( return(error); } + +/** Add a single compression dictionary definition to the SYS_ZIP_DICT +InnoDB system table. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_add_zip_dict( + const char* name, /*!< in: dict name */ + ulint name_len, /*!< in: dict name length */ + const char* data, /*!< in: dict data */ + ulint data_len, /*!< in: dict data length */ + trx_t* trx) /*!< in/out: transaction */ +{ + ut_ad(name); + ut_ad(data); + + pars_info_t* info = pars_info_create(); + + pars_info_add_literal(info, "name", name, name_len, + DATA_VARCHAR, DATA_ENGLISH); + pars_info_add_literal(info, "data", data, data_len, + DATA_BLOB, DATA_BINARY_TYPE | DATA_NOT_NULL); + + dberr_t error = que_eval_sql(info, + "PROCEDURE P () IS\n" + " max_id INT;\n" + "DECLARE CURSOR cur IS\n" + " SELECT ID FROM SYS_ZIP_DICT\n" + " ORDER BY ID DESC;\n" + "BEGIN\n" + " max_id := 0;\n" + " OPEN cur;\n" + " FETCH cur INTO max_id;\n" + " IF (cur % NOTFOUND) THEN\n" + " max_id := 0;\n" + " END IF;\n" + " CLOSE cur;\n" + " INSERT INTO SYS_ZIP_DICT VALUES" + " (max_id + 1, :name, :data);\n" + "END;\n", + FALSE, trx); + + return error; +} + +/** Fetch callback, just stores extracted zip_dict id in the external +variable. +@return TRUE if all OK */ +static +ibool +dict_create_extract_int_aux( + void* row, /*!< in: sel_node_t* */ + void* user_arg) /*!< in: int32 id */ +{ + sel_node_t* node = static_cast(row); + dfield_t* dfield = que_node_get_val(node->select_list); + dtype_t* type = dfield_get_type(dfield); + ulint len = dfield_get_len(dfield); + + ut_a(dtype_get_mtype(type) == DATA_INT); + ut_a(len == sizeof(ib_uint32_t)); + + memcpy(user_arg, dfield_get_data(dfield), sizeof(ib_uint32_t)); + + return(TRUE); +} + +/** Add a single compression dictionary reference to the SYS_ZIP_DICT_COLS +InnoDB system table. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_add_zip_dict_reference( + ulint table_id, /*!< in: table id */ + ulint column_pos, /*!< in: column position */ + ulint dict_id, /*!< in: dict id */ + trx_t* trx) /*!< in/out: transaction */ +{ + pars_info_t* info = pars_info_create(); + + pars_info_add_int4_literal(info, "table_id", table_id); + pars_info_add_int4_literal(info, "column_pos", column_pos); + pars_info_add_int4_literal(info, "dict_id", dict_id); + + dberr_t error = que_eval_sql(info, + "PROCEDURE P () IS\n" + "BEGIN\n" + " INSERT INTO SYS_ZIP_DICT_COLS VALUES" + " (:table_id, :column_pos, :dict_id);\n" + "END;\n", + FALSE, trx); + return error; +} + +/** Get a single compression dictionary id for the given +(table id, column pos) pair. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_get_zip_dict_id_by_reference( + ulint table_id, /*!< in: table id */ + ulint column_pos, /*!< in: column position */ + ulint* dict_id, /*!< out: dict id */ + trx_t* trx) /*!< in/out: transaction */ +{ + ut_ad(dict_id); + + pars_info_t* info = pars_info_create(); + + ib_uint32_t dict_id_buf; + mach_write_to_4(reinterpret_cast(&dict_id_buf ), + ULINT32_UNDEFINED); + + pars_info_add_int4_literal(info, "table_id", table_id); + pars_info_add_int4_literal(info, "column_pos", column_pos); + pars_info_bind_function( + info, "my_func", dict_create_extract_int_aux, &dict_id_buf); + + dberr_t error = que_eval_sql(info, + "PROCEDURE P () IS\n" + "DECLARE FUNCTION my_func;\n" + "DECLARE CURSOR cur IS\n" + " SELECT DICT_ID FROM SYS_ZIP_DICT_COLS\n" + " WHERE TABLE_ID = :table_id AND\n" + " COLUMN_POS = :column_pos;\n" + "BEGIN\n" + " OPEN cur;\n" + " FETCH cur INTO my_func();\n" + " CLOSE cur;\n" + "END;\n", + FALSE, trx); + if (error == DB_SUCCESS) { + ib_uint32_t local_dict_id = mach_read_from_4( + reinterpret_cast(&dict_id_buf)); + if (local_dict_id == ULINT32_UNDEFINED) + error = DB_RECORD_NOT_FOUND; + else + *dict_id = local_dict_id; + } + return error; +} + +/** Get compression dictionary id for the given name. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_get_zip_dict_id_by_name( + const char* dict_name, /*!< in: dict name */ + ulint dict_name_len, /*!< in: dict name length */ + ulint* dict_id, /*!< out: dict id */ + trx_t* trx) /*!< in/out: transaction */ +{ + ut_ad(dict_name); + ut_ad(dict_name_len); + ut_ad(dict_id); + + pars_info_t* info = pars_info_create(); + + pars_info_add_literal(info, "dict_name", dict_name, dict_name_len, + DATA_VARCHAR, DATA_ENGLISH); + + ib_uint32_t dict_id_buf; + mach_write_to_4(reinterpret_cast(&dict_id_buf), + ULINT32_UNDEFINED); + pars_info_bind_function( + info, "my_func", dict_create_extract_int_aux, &dict_id_buf); + + dberr_t error = que_eval_sql(info, + "PROCEDURE P () IS\n" + "DECLARE FUNCTION my_func;\n" + "DECLARE CURSOR cur IS\n" + " SELECT ID FROM SYS_ZIP_DICT\n" + " WHERE NAME = :dict_name;\n" + "BEGIN\n" + " OPEN cur;\n" + " FETCH cur INTO my_func();\n" + " CLOSE cur;\n" + "END;\n", + FALSE, trx); + if (error == DB_SUCCESS) { + ib_uint32_t local_dict_id = mach_read_from_4( + reinterpret_cast(&dict_id_buf)); + if (local_dict_id == ULINT32_UNDEFINED) + error = DB_RECORD_NOT_FOUND; + else + *dict_id = local_dict_id; + } + return error; +} + +/** Auxiliary enum used to indicate zip dict data extraction result code */ +enum zip_dict_info_aux_code { + zip_dict_info_success, /*!< success */ + zip_dict_info_not_found, /*!< zip dict record not found */ + zip_dict_info_oom, /*!< out of memory */ + zip_dict_info_corrupted_name, /*!< corrupted zip dict name */ + zip_dict_info_corrupted_data /*!< corrupted zip dict data */ +}; + +/** Auxiliary struct used to return zip dict info aling with result code */ +struct zip_dict_info_aux { + LEX_STRING name; /*!< zip dict name */ + LEX_STRING data; /*!< zip dict data */ + int code; /*!< result code (0 - success) */ +}; + +/** Fetch callback, just stores extracted zip_dict data in the external +variable. +@return always returns TRUE */ +static +ibool +dict_create_get_zip_dict_info_by_id_aux( + void* row, /*!< in: sel_node_t* */ + void* user_arg) /*!< in: pointer to zip_dict_info_aux* */ +{ + sel_node_t* node = static_cast(row); + zip_dict_info_aux* result = + static_cast(user_arg); + + result->code = zip_dict_info_success; + result->name.str = 0; + result->name.length = 0; + result->data.str = 0; + result->data.length = 0; + + /* NAME field */ + que_node_t* exp = node->select_list; + ut_a(exp != 0); + + dfield_t* dfield = que_node_get_val(exp); + dtype_t* type = dfield_get_type(dfield); + ut_a(dtype_get_mtype(type) == DATA_VARCHAR); + + ulint len = dfield_get_len(dfield); + void* data = dfield_get_data(dfield); + + + if (len == UNIV_SQL_NULL) { + result->code = zip_dict_info_corrupted_name; + } + else { + result->name.str = + static_cast(my_malloc(len + 1, MYF(0))); + if (result->name.str == 0) { + result->code = zip_dict_info_oom; + } + else { + memcpy(result->name.str, data, len); + result->name.str[len] = '\0'; + result->name.length = len; + } + } + + /* DATA field */ + exp = que_node_get_next(exp); + ut_a(exp != 0); + + dfield = que_node_get_val(exp); + type = dfield_get_type(dfield); + ut_a(dtype_get_mtype(type) == DATA_BLOB); + + len = dfield_get_len(dfield); + data = dfield_get_data(dfield); + + if (len == UNIV_SQL_NULL) { + result->code = zip_dict_info_corrupted_data; + } + else { + result->data.str = + static_cast(my_malloc( + len == 0 ? 1 : len, MYF(0))); + if (result->data.str == 0) { + result->code = zip_dict_info_oom; + } + else { + memcpy(result->data.str, data, len); + result->data.length = len; + } + } + + ut_ad(que_node_get_next(exp) == 0); + + if (result->code != zip_dict_info_success) { + if (result->name.str == 0) { + mem_free(result->name.str); + result->name.str = 0; + result->name.length = 0; + } + if (result->data.str == 0) { + mem_free(result->data.str); + result->data.str = 0; + result->data.length = 0; + } + } + + return TRUE; +} + +/** Get compression dictionary info (name and data) for the given id. +Allocates memory for name and data on success. +Must be freed with mem_free(). +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_get_zip_dict_info_by_id( + ulint dict_id, /*!< in: dict id */ + char** name, /*!< out: dict name */ + ulint* name_len, /*!< out: dict name length*/ + char** data, /*!< out: dict data */ + ulint* data_len, /*!< out: dict data length*/ + trx_t* trx) /*!< in/out: transaction */ +{ + ut_ad(name); + ut_ad(data); + + zip_dict_info_aux rec; + rec.code = zip_dict_info_not_found; + pars_info_t* info = pars_info_create(); + + pars_info_add_int4_literal(info, "id", dict_id); + pars_info_bind_function( + info, "my_func", dict_create_get_zip_dict_info_by_id_aux, + &rec); + + dberr_t error = que_eval_sql(info, + "PROCEDURE P () IS\n" + "DECLARE FUNCTION my_func;\n" + "DECLARE CURSOR cur IS\n" + " SELECT NAME, DATA FROM SYS_ZIP_DICT\n" + " WHERE ID = :id;\n" + "BEGIN\n" + " OPEN cur;\n" + " FETCH cur INTO my_func();\n" + " CLOSE cur;\n" + "END;\n", + FALSE, trx); + if (error == DB_SUCCESS) { + switch (rec.code) { + case zip_dict_info_success: + *name = rec.name.str; + *name_len = rec.name.length; + *data = rec.data.str; + *data_len = rec.data.length; + break; + case zip_dict_info_not_found: + error = DB_RECORD_NOT_FOUND; + break; + case zip_dict_info_oom: + error = DB_OUT_OF_MEMORY; + break; + case zip_dict_info_corrupted_name: + case zip_dict_info_corrupted_data: + error = DB_INVALID_NULL; + break; + default: + ut_error; + } + } + return error; +} + +/** Remove a single compression dictionary from the data dictionary +tables in the database. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_remove_zip_dict( + const char* name, /*!< in: dict name */ + ulint name_len, /*!< in: dict name length */ + trx_t* trx) /*!< in/out: transaction */ +{ + ut_ad(name); + + pars_info_t* info = pars_info_create(); + + ib_uint32_t dict_id_buf; + mach_write_to_4(reinterpret_cast(&dict_id_buf), + ULINT32_UNDEFINED); + ib_uint32_t counter_buf; + mach_write_to_4(reinterpret_cast(&counter_buf), + ULINT32_UNDEFINED); + + pars_info_add_literal(info, "name", name, name_len, + DATA_VARCHAR, DATA_ENGLISH); + pars_info_bind_int4_literal(info, "dict_id", &dict_id_buf); + pars_info_bind_function(info, "find_dict_func", + dict_create_extract_int_aux, &dict_id_buf); + pars_info_bind_function(info, "count_func", + dict_create_extract_int_aux, &counter_buf); + + dberr_t error = que_eval_sql(info, + "PROCEDURE P () IS\n" + "DECLARE FUNCTION find_dict_func;\n" + "DECLARE FUNCTION count_func;\n" + "DECLARE CURSOR dict_cur IS\n" + " SELECT ID FROM SYS_ZIP_DICT\n" + " WHERE NAME = :name\n" + " FOR UPDATE;\n" + "DECLARE CURSOR ref_cur IS\n" + " SELECT 1 FROM SYS_ZIP_DICT_COLS\n" + " WHERE DICT_ID = :dict_id;\n" + "BEGIN\n" + " OPEN dict_cur;\n" + " FETCH dict_cur INTO find_dict_func();\n" + " IF NOT (SQL % NOTFOUND) THEN\n" + " OPEN ref_cur;\n" + " FETCH ref_cur INTO count_func();\n" + " IF SQL % NOTFOUND THEN\n" + " DELETE FROM SYS_ZIP_DICT WHERE CURRENT OF dict_cur;\n" + " END IF;\n" + " CLOSE ref_cur;\n" + " END IF;\n" + " CLOSE dict_cur;\n" + "END;\n", + FALSE, trx); + if (error == DB_SUCCESS) { + ib_uint32_t local_dict_id = mach_read_from_4( + reinterpret_cast(&dict_id_buf)); + if (local_dict_id == ULINT32_UNDEFINED) { + error = DB_RECORD_NOT_FOUND; + } + else { + ib_uint32_t local_counter = mach_read_from_4( + reinterpret_cast(&counter_buf)); + if (local_counter != ULINT32_UNDEFINED) + error = DB_ROW_IS_REFERENCED; + } + } + return error; +} + +/** Remove all compression dictionary references for the given table ID from +the data dictionary tables in the database. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_remove_zip_dict_references_for_table( + ulint table_id, /*!< in: table id */ + trx_t* trx) /*!< in/out: transaction */ +{ + pars_info_t* info = pars_info_create(); + + pars_info_add_int4_literal(info, "table_id", table_id); + + dberr_t error = que_eval_sql(info, + "PROCEDURE P () IS\n" + "BEGIN\n" + " DELETE FROM SYS_ZIP_DICT_COLS\n" + " WHERE TABLE_ID = :table_id;\n" + "END;\n", + FALSE, trx); + return error; +} diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc index f1fbf25c3a61e..57dd6cfa04dff 100644 --- a/storage/xtradb/dict/dict0dict.cc +++ b/storage/xtradb/dict/dict0dict.cc @@ -6781,3 +6781,161 @@ dict_tf_to_row_format_string( return(0); } #endif /* !UNIV_HOTBACKUP */ + +/** Insert a records into SYS_ZIP_DICT. +@retval DB_SUCCESS if OK +@retval dberr_t if the insert failed */ +UNIV_INTERN +dberr_t +dict_create_zip_dict( + const char* name, /*!< in: zip_dict name */ + ulint name_len, /*!< in: zip_dict name length*/ + const char* data, /*!< in: zip_dict data */ + ulint data_len) /*!< in: zip_dict data length */ +{ + dberr_t err = DB_SUCCESS; + trx_t* trx; + + ut_ad(name); + ut_ad(data); + + rw_lock_x_lock(&dict_operation_lock); + dict_mutex_enter_for_mysql(); + + trx = trx_allocate_for_background(); + trx->op_info = "insert zip_dict"; + trx->dict_operation_lock_mode = RW_X_LATCH; + trx_start_if_not_started(trx); + + err = dict_create_add_zip_dict(name, name_len, data, data_len, trx); + + if (err == DB_SUCCESS) { + trx_commit_for_mysql(trx); + } + else { + trx->op_info = "rollback of internal trx on zip_dict table"; + trx_rollback_to_savepoint(trx, NULL); + ut_a(trx->error_state == DB_SUCCESS); + } + trx->op_info = ""; + trx->dict_operation_lock_mode = 0; + trx_free_for_background(trx); + + dict_mutex_exit_for_mysql(); + rw_lock_x_unlock(&dict_operation_lock); + + return err; +} +/** Get single compression dictionary id for the given +(table id, column pos) pair. +@retval DB_SUCCESS if OK +@retval DB_RECORD_NOT_FOUND if not found */ +UNIV_INTERN +dberr_t +dict_get_dictionary_id_by_key( + ulint table_id, /*!< in: table id */ + ulint column_pos, /*!< in: column position */ + ulint* dict_id) /*!< out: zip_dict id */ +{ + dberr_t err = DB_SUCCESS; + trx_t* trx; + + rw_lock_s_lock(&dict_operation_lock); + dict_mutex_enter_for_mysql(); + + trx = trx_allocate_for_background(); + trx->op_info = "get zip dict id by composite key"; + trx->dict_operation_lock_mode = RW_S_LATCH; + trx_start_if_not_started(trx); + + err = dict_create_get_zip_dict_id_by_reference(table_id, column_pos, + dict_id, trx); + + trx_commit_for_mysql(trx); + trx->dict_operation_lock_mode = 0; + trx_free_for_background(trx); + + dict_mutex_exit_for_mysql(); + rw_lock_s_unlock(&dict_operation_lock); + + return err; +} +/** Get compression dictionary info (name and data) for the given id. +Allocates memory in name->str and data->str on success. +Must be freed with mem_free(). +@retval DB_SUCCESS if OK +@retval DB_RECORD_NOT_FOUND if not found */ +UNIV_INTERN +dberr_t +dict_get_dictionary_info_by_id( + ulint dict_id, /*!< in: table name */ + char** name, /*!< out: dictionary name */ + ulint* name_len, /*!< out: dictionary name length*/ + char** data, /*!< out: dictionary data */ + ulint* data_len) /*!< out: dictionary data length*/ +{ + dberr_t err = DB_SUCCESS; + trx_t* trx; + + rw_lock_s_lock(&dict_operation_lock); + dict_mutex_enter_for_mysql(); + + trx = trx_allocate_for_background(); + trx->op_info = "get zip dict name and data by id"; + trx->dict_operation_lock_mode = RW_S_LATCH; + trx_start_if_not_started(trx); + + err = dict_create_get_zip_dict_info_by_id(dict_id, name, name_len, + data, data_len, trx); + + trx_commit_for_mysql(trx); + trx->dict_operation_lock_mode = 0; + trx_free_for_background(trx); + + dict_mutex_exit_for_mysql(); + rw_lock_s_unlock(&dict_operation_lock); + + return err; +} +/** Delete a record in SYS_ZIP_DICT with the given name. +@retval DB_SUCCESS if OK +@retval DB_RECORD_NOT_FOUND if not found +@retval DB_ROW_IS_REFERENCED if in use */ +UNIV_INTERN +dberr_t +dict_drop_zip_dict( + const char* name, /*!< in: zip_dict name */ + ulint name_len) /*!< in: zip_dict name length*/ +{ + dberr_t err = DB_SUCCESS; + trx_t* trx; + + ut_ad(name); + + rw_lock_x_lock(&dict_operation_lock); + dict_mutex_enter_for_mysql(); + + trx = trx_allocate_for_background(); + trx->op_info = "delete zip_dict"; + trx->dict_operation_lock_mode = RW_X_LATCH; + trx_start_if_not_started(trx); + + err = dict_create_remove_zip_dict(name, name_len, trx); + + if (err == DB_SUCCESS) { + trx_commit_for_mysql(trx); + } + else { + trx->op_info = "rollback of internal trx on zip_dict table"; + trx_rollback_to_savepoint(trx, NULL); + ut_a(trx->error_state == DB_SUCCESS); + } + trx->op_info = ""; + trx->dict_operation_lock_mode = 0; + trx_free_for_background(trx); + + dict_mutex_exit_for_mysql(); + rw_lock_x_unlock(&dict_operation_lock); + + return err; +} diff --git a/storage/xtradb/dict/dict0load.cc b/storage/xtradb/dict/dict0load.cc index 988351dbca55a..db2aa3239f5ab 100644 --- a/storage/xtradb/dict/dict0load.cc +++ b/storage/xtradb/dict/dict0load.cc @@ -56,7 +56,9 @@ static const char* SYSTEM_TABLE_NAME[] = { "SYS_FOREIGN", "SYS_FOREIGN_COLS", "SYS_TABLESPACES", - "SYS_DATAFILES" + "SYS_DATAFILES", + "SYS_ZIP_DICT", + "SYS_ZIP_DICT_COLS" }; /* If this flag is TRUE, then we will load the cluster index's (and tables') @@ -728,6 +730,161 @@ dict_process_sys_datafiles( return(NULL); } +/** This function parses a SYS_ZIP_DICT record, extracts necessary +information from the record and returns to caller. +@return error message, or NULL on success */ +UNIV_INTERN +const char* +dict_process_sys_zip_dict( + mem_heap_t* heap, /*!< in/out: heap memory */ + ulint zip_size, /*!< in: nonzero=compressed BLOB page size */ + const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */ + ulint* id, /*!< out: dict id */ + const char** name, /*!< out: dict name */ + const char** data, /*!< out: dict data */ + ulint* data_len) /*!< out: dict data length */ +{ + ulint len; + const byte* field; + + /* Initialize the output values */ + *id = ULINT_UNDEFINED; + *name = NULL; + *data = NULL; + *data_len = 0; + + if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) { + return("delete-marked record in SYS_ZIP_DICT"); + } + + if (UNIV_UNLIKELY( + rec_get_n_fields_old(rec)!= DICT_NUM_FIELDS__SYS_ZIP_DICT)) { + return("wrong number of columns in SYS_ZIP_DICT record"); + } + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_ZIP_DICT__ID, &len); + if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) { + goto err_len; + } + *id = mach_read_from_4(field); + + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_ZIP_DICT__DB_TRX_ID, &len); + if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) { + goto err_len; + } + + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_ZIP_DICT__DB_ROLL_PTR, &len); + if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) { + goto err_len; + } + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_ZIP_DICT__NAME, &len); + if (UNIV_UNLIKELY(len == 0 || len == UNIV_SQL_NULL)) { + goto err_len; + } + *name = mem_heap_strdupl(heap, (char*) field, len); + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_ZIP_DICT__DATA, &len); + if (UNIV_UNLIKELY(len == UNIV_SQL_NULL)) { + goto err_len; + } + + if (rec_get_1byte_offs_flag(rec) == 0 && + rec_2_is_field_extern(rec, DICT_FLD__SYS_ZIP_DICT__DATA)) { + ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); + + if (UNIV_UNLIKELY + (!memcmp(field + len - BTR_EXTERN_FIELD_REF_SIZE, + field_ref_zero, + BTR_EXTERN_FIELD_REF_SIZE))) { + goto err_len; + } + *data = reinterpret_cast( + btr_copy_externally_stored_field(data_len, field, + zip_size, len, heap)); + } + else { + *data_len = len; + *data = static_cast(mem_heap_dup(heap, field, len)); + } + + return(NULL); + +err_len: + return("incorrect column length in SYS_ZIP_DICT"); +} + +/** This function parses a SYS_ZIP_DICT_COLS record, extracts necessary +information from the record and returns to caller. +@return error message, or NULL on success */ +UNIV_INTERN +const char* +dict_process_sys_zip_dict_cols( + mem_heap_t* heap, /*!< in/out: heap memory */ + const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */ + ulint* table_id, /*!< out: table id */ + ulint* column_pos, /*!< out: column position */ + ulint* dict_id) /*!< out: dict id */ +{ + ulint len; + const byte* field; + + /* Initialize the output values */ + *table_id = ULINT_UNDEFINED; + *column_pos = ULINT_UNDEFINED; + *dict_id = ULINT_UNDEFINED; + + if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) { + return("delete-marked record in SYS_ZIP_DICT_COLS"); + } + + if (UNIV_UNLIKELY(rec_get_n_fields_old(rec) != + DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS)) { + return("wrong number of columns in SYS_ZIP_DICT_COLS" + " record"); + } + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_ZIP_DICT_COLS__TABLE_ID, &len); + if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) { +err_len: + return("incorrect column length in SYS_ZIP_DICT_COLS"); + } + *table_id = mach_read_from_4(field); + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_ZIP_DICT_COLS__COLUMN_POS, &len); + if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) { + goto err_len; + } + *column_pos = mach_read_from_4(field); + + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_ZIP_DICT_COLS__DB_TRX_ID, &len); + if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) { + goto err_len; + } + + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_ZIP_DICT_COLS__DB_ROLL_PTR, &len); + if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) { + goto err_len; + } + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_ZIP_DICT_COLS__DICT_ID, &len); + if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) { + goto err_len; + } + *dict_id = mach_read_from_4(field); + + return(NULL); +} /********************************************************************//** Determine the flags of a table as stored in SYS_TABLES.TYPE and N_COLS. @return ULINT_UNDEFINED if error, else a valid dict_table_t::flags. */ diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index c1dbb5f91b996..57e415ae939fe 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -489,6 +489,8 @@ fil_space_get_by_id( ut_ad(space->magic_n == FIL_SPACE_MAGIC_N), space->id == id); + /* The system tablespace must always be found */ + ut_ad(space || id != 0 || srv_is_being_started); return(space); } diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc index 25059db96b078..a0f0fab5566b0 100644 --- a/storage/xtradb/fts/fts0fts.cc +++ b/storage/xtradb/fts/fts0fts.cc @@ -108,6 +108,7 @@ UNIV_INTERN mysql_pfs_key_t fts_pll_tokenize_mutex_key; /** variable to record innodb_fts_internal_tbl_name for information schema table INNODB_FTS_INSERTED etc. */ UNIV_INTERN char* fts_internal_tbl_name = NULL; +UNIV_INTERN char* fts_internal_tbl_name2 = NULL; /** InnoDB default stopword list: There are different versions of stopwords, the stop words listed @@ -6569,6 +6570,36 @@ fts_check_corrupt_index( return(0); } +/* Get parent table name if it's a fts aux table +@param[in] aux_table_name aux table name +@param[in] aux_table_len aux table length +@return parent table name, or NULL */ +char* +fts_get_parent_table_name( + const char* aux_table_name, + ulint aux_table_len) +{ + fts_aux_table_t aux_table; + char* parent_table_name = NULL; + + if (fts_is_aux_table_name(&aux_table, aux_table_name, aux_table_len)) { + dict_table_t* parent_table; + + parent_table = dict_table_open_on_id( + aux_table.parent_id, TRUE, DICT_TABLE_OP_NORMAL); + + if (parent_table != NULL) { + parent_table_name = mem_strdupl( + parent_table->name, + strlen(parent_table->name)); + + dict_table_close(parent_table, TRUE, FALSE); + } + } + + return(parent_table_name); +} + /** Check the validity of the parent table. @param[in] aux_table auxiliary table @return true if it is a valid table or false if it is not */ diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index f00d11bd870c4..58d638d0b0c30 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -1361,6 +1361,29 @@ normalize_table_name_low( ibool set_lower_case); /* in: TRUE if we want to set name to lower case */ +/** Creates a new compression dictionary. */ +static +handler_create_zip_dict_result +innobase_create_zip_dict( + handlerton* hton, /*!< in: innobase handlerton */ + THD* thd, /*!< in: handle to the MySQL thread */ + const char* name, /*!< in: zip dictionary name */ + ulint* name_len, + /*!< in/out: zip dictionary name length */ + const char* data, /*!< in: zip dictionary data */ + ulint* data_len); + /*!< in/out: zip dictionary data length */ + +/** Drops a existing compression dictionary. */ +static +handler_drop_zip_dict_result +innobase_drop_zip_dict( + handlerton* hton, /*!< in: innobase handlerton */ + THD* thd, /*!< in: handle to the MySQL thread */ + const char* name, /*!< in: zip dictionary name */ + ulint* name_len); + /*!< in/out: zip dictionary name length */ + /*************************************************************//** Checks if buffer pool is big enough to enable backoff algorithm. InnoDB empty free list algorithm backoff requires free pages @@ -3422,6 +3445,9 @@ innobase_init( innobase_hton->kill_connection = innobase_kill_connection; + innobase_hton->create_zip_dict = innobase_create_zip_dict; + innobase_hton->drop_zip_dict = innobase_drop_zip_dict; + ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR); #ifndef DBUG_OFF @@ -4100,6 +4126,89 @@ innobase_purge_changed_page_bitmaps( return (my_bool)log_online_purge_changed_page_bitmaps(lsn); } +/** Creates a new compression dictionary. */ +static +handler_create_zip_dict_result +innobase_create_zip_dict( + handlerton* hton, /*!< in: innobase handlerton */ + THD* thd, /*!< in: handle to the MySQL thread */ + const char* name, /*!< in: zip dictionary name */ + ulint* name_len, + /*!< in/out: zip dictionary name length */ + const char* data, /*!< in: zip dictionary data */ + ulint* data_len) + /*!< in/out: zip dictionary data length */ +{ + handler_create_zip_dict_result result = + HA_CREATE_ZIP_DICT_UNKNOWN_ERROR; + + DBUG_ENTER("innobase_create_zip_dict"); + DBUG_ASSERT(hton == innodb_hton_ptr); + + if (UNIV_UNLIKELY(high_level_read_only)) { + DBUG_RETURN(HA_CREATE_ZIP_DICT_READ_ONLY); + } + + if (UNIV_UNLIKELY(*name_len > ZIP_DICT_MAX_NAME_LENGTH)) { + *name_len = ZIP_DICT_MAX_NAME_LENGTH; + DBUG_RETURN(HA_CREATE_ZIP_DICT_NAME_TOO_LONG); + } + + if (UNIV_UNLIKELY(*data_len > ZIP_DICT_MAX_DATA_LENGTH)) { + *data_len = ZIP_DICT_MAX_DATA_LENGTH; + DBUG_RETURN(HA_CREATE_ZIP_DICT_DATA_TOO_LONG); + } + + switch (dict_create_zip_dict(name, *name_len, data, *data_len)) { + case DB_SUCCESS: + result = HA_CREATE_ZIP_DICT_OK; + break; + case DB_DUPLICATE_KEY: + result = HA_CREATE_ZIP_DICT_ALREADY_EXISTS; + break; + default: + ut_ad(0); + result = HA_CREATE_ZIP_DICT_UNKNOWN_ERROR; + } + DBUG_RETURN(result); +} + +/** Drops a existing compression dictionary. */ +static +handler_drop_zip_dict_result +innobase_drop_zip_dict( + handlerton* hton, /*!< in: innobase handlerton */ + THD* thd, /*!< in: handle to the MySQL thread */ + const char* name, /*!< in: zip dictionary name */ + ulint* name_len) + /*!< in/out: zip dictionary name length */ +{ + handler_drop_zip_dict_result result = HA_DROP_ZIP_DICT_UNKNOWN_ERROR; + + DBUG_ENTER("innobase_drop_zip_dict"); + DBUG_ASSERT(hton == innodb_hton_ptr); + + if (UNIV_UNLIKELY(high_level_read_only)) { + DBUG_RETURN(HA_DROP_ZIP_DICT_READ_ONLY); + } + + switch (dict_drop_zip_dict(name, *name_len)) { + case DB_SUCCESS: + result = HA_DROP_ZIP_DICT_OK; + break; + case DB_RECORD_NOT_FOUND: + result = HA_DROP_ZIP_DICT_DOES_NOT_EXIST; + break; + case DB_ROW_IS_REFERENCED: + result = HA_DROP_ZIP_DICT_IS_REFERENCED; + break; + default: + ut_ad(0); + result = HA_DROP_ZIP_DICT_UNKNOWN_ERROR; + } + DBUG_RETURN(result); +} + /*****************************************************************//** Check whether this is a fake change transaction. @return TRUE if a fake change transaction */ @@ -5460,6 +5569,86 @@ innobase_build_index_translation( DBUG_RETURN(ret); } +/** This function checks if all the compression dictionaries referenced +in table->fields exist in SYS_ZIP_DICT InnoDB system table. +@return true if all referenced dictionaries exist */ +UNIV_INTERN +bool +innobase_check_zip_dicts( + const TABLE* table, /*!< in: table in MySQL data + dictionary */ + ulint* dict_ids, /*!< out: identified zip dict ids + (at least n_fields long) */ + trx_t* trx, /*!< in: transaction */ + const char** err_dict_name) /*!< out: the name of the + zip_dict which does not exist. */ +{ + DBUG_ENTER("innobase_check_zip_dicts"); + + bool res = true; + dberr_t err = DB_SUCCESS; + const size_t n_fields = table->s->fields; + + Field* field_ptr; + for (size_t field_idx = 0; err == DB_SUCCESS && field_idx < n_fields; + ++field_idx) + { + field_ptr = table->field[field_idx]; + if (field_ptr->has_associated_compression_dictionary()) { + err = dict_create_get_zip_dict_id_by_name( + field_ptr->zip_dict_name.str, + field_ptr->zip_dict_name.length, + &dict_ids[field_idx], + trx); + ut_a(err == DB_SUCCESS || err == DB_RECORD_NOT_FOUND); + } + else { + dict_ids[field_idx] = ULINT_UNDEFINED; + } + + } + + if (err != DB_SUCCESS) { + res = false; + *err_dict_name = field_ptr->zip_dict_name.str; + } + + DBUG_RETURN(res); +} + +/** This function creates compression dictionary references in +SYS_ZIP_DICT_COLS InnoDB system table for table_id based on info +in table->fields and provided zip dict ids. */ +UNIV_INTERN +void +innobase_create_zip_dict_references( + const TABLE* table, /*!< in: table in MySQL data + dictionary */ + table_id_t ib_table_id, /*!< in: table ID in Innodb data + dictionary */ + ulint* zip_dict_ids, /*!< in: zip dict ids + (at least n_fields long) */ + trx_t* trx) /*!< in: transaction */ +{ + DBUG_ENTER("innobase_create_zip_dict_references"); + + dberr_t err = DB_SUCCESS; + const size_t n_fields = table->s->fields; + + for (size_t field_idx = 0; err == DB_SUCCESS && field_idx < n_fields; + ++field_idx) + { + if (zip_dict_ids[field_idx] != ULINT_UNDEFINED) { + err = dict_create_add_zip_dict_reference(ib_table_id, + table->field[field_idx]->field_index, + zip_dict_ids[field_idx], trx); + ut_a(err == DB_SUCCESS); + } + } + + DBUG_VOID_RETURN; +} + /*******************************************************************//** This function uses index translation table to quickly locate the requested index structure. @@ -6749,7 +6938,12 @@ ha_innobase::store_key_val_for_row( blob_data = row_mysql_read_blob_ref(&blob_len, (byte*) (record + (ulint) get_field_offset(table, field)), - (ulint) field->pack_length()); + (ulint) field->pack_length(), + field->column_format() == + COLUMN_FORMAT_TYPE_COMPRESSED, + reinterpret_cast( + field->zip_dict_data.str), + field->zip_dict_data.length, prebuilt); true_len = blob_len; @@ -7004,6 +7198,9 @@ build_template_field( templ->mbminlen = dict_col_get_mbminlen(col); templ->mbmaxlen = dict_col_get_mbmaxlen(col); templ->is_unsigned = col->prtype & DATA_UNSIGNED; + templ->compressed = (field->column_format() + == COLUMN_FORMAT_TYPE_COMPRESSED); + templ->zip_dict_data = field->zip_dict_data; if (!dict_index_is_clust(index) && templ->rec_field_no == ULINT_UNDEFINED) { @@ -7761,8 +7958,11 @@ calc_row_difference( switch (col_type) { case DATA_BLOB: - o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len); - n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len); + /* Do not compress blob column while comparing*/ + o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len, + false, 0, 0, prebuilt); + n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len, + false, 0, 0, prebuilt); break; @@ -7832,7 +8032,13 @@ calc_row_difference( TRUE, new_mysql_row_col, col_pack_len, - dict_table_is_comp(prebuilt->table)); + dict_table_is_comp(prebuilt->table), + field->column_format() == + COLUMN_FORMAT_TYPE_COMPRESSED, + reinterpret_cast( + field->zip_dict_data.str), + field->zip_dict_data.length, + prebuilt); dfield_copy(&ufield->new_val, &dfield); } else { dfield_set_null(&ufield->new_val); @@ -9503,6 +9709,7 @@ create_table_def( ulint unsigned_type; ulint binary_type; ulint long_true_varchar; + ulint compressed; ulint charset_no; ulint i; ulint doc_id_col = 0; @@ -9649,6 +9856,13 @@ create_table_def( } } + /* Check if the the field has COMPRESSED attribute */ + compressed = 0; + if (field->column_format() == + COLUMN_FORMAT_TYPE_COMPRESSED) { + compressed = DATA_COMPRESSED; + } + /* First check whether the column to be added has a system reserved name. */ if (dict_col_name_is_reserved(field->field_name)){ @@ -9669,7 +9883,8 @@ create_table_def( dtype_form_prtype( (ulint) field->type() | nulls_allowed | unsigned_type - | binary_type | long_true_varchar, + | binary_type | long_true_varchar + | compressed, charset_no), col_len); } @@ -10505,6 +10720,10 @@ ha_innobase::create( const char* stmt; size_t stmt_len; + mem_heap_t* heap = 0; + ulint* zip_dict_ids = 0; + const char* err_zip_dict_name = 0; + DBUG_ENTER("ha_innobase::create"); DBUG_ASSERT(thd != NULL); @@ -10595,6 +10814,18 @@ ha_innobase::create( row_mysql_lock_data_dictionary(trx); + heap = mem_heap_create(form->s->fields * sizeof(ulint)); + zip_dict_ids = static_cast( + mem_heap_alloc(heap, form->s->fields * sizeof(ulint))); + + if (!innobase_check_zip_dicts(form, zip_dict_ids, + trx, &err_zip_dict_name)) { + error = -1; + my_error(ER_COMPRESSION_DICTIONARY_DOES_NOT_EXIST, + MYF(0), err_zip_dict_name); + goto cleanup; + } + error = create_table_def(trx, form, norm_name, temp_path, remote_path, flags, flags2); if (error) { @@ -10702,6 +10933,22 @@ ha_innobase::create( dict_table_get_all_fts_indexes(innobase_table, fts->indexes); } + /* + Adding compression dictionary <-> compressed table column links + to the SYS_ZIP_DICT_COLS table. + */ + ut_a(zip_dict_ids != 0); + { + dict_table_t* local_table = dict_table_open_on_name( + norm_name, TRUE, FALSE, DICT_ERR_IGNORE_NONE); + + ut_a(local_table); + table_id_t table_id = local_table->id; + dict_table_close(local_table, TRUE, FALSE); + innobase_create_zip_dict_references(form, + table_id, zip_dict_ids, trx); + } + stmt = innobase_get_stmt(thd, &stmt_len); if (stmt) { @@ -10818,6 +11065,9 @@ ha_innobase::create( trx_free_for_mysql(trx); + if (heap != 0) + mem_heap_free(heap); + DBUG_RETURN(0); cleanup: @@ -10827,6 +11077,9 @@ ha_innobase::create( trx_free_for_mysql(trx); + if (heap != 0) + mem_heap_free(heap); + DBUG_RETURN(error); } @@ -11904,6 +12157,14 @@ ha_innobase::info_low( if (dict_stats_is_persistent_enabled(ib_table)) { if (is_analyze) { + + /* If this table is already queued for + background analyze, remove it from the + queue as we are about to do the same */ + dict_mutex_enter_for_mysql(); + dict_stats_recalc_pool_del(ib_table); + dict_mutex_exit_for_mysql(); + opt = DICT_STATS_RECALC_PERSISTENT; } else { /* This is e.g. 'SHOW INDEXES', fetch @@ -13050,6 +13311,11 @@ ha_innobase::extra( if (prebuilt->blob_heap) { row_mysql_prebuilt_free_blob_heap(prebuilt); } + + if (prebuilt->compress_heap) { + row_mysql_prebuilt_free_compress_heap(prebuilt); + } + break; case HA_EXTRA_RESET_STATE: reset_template(); @@ -13101,6 +13367,10 @@ ha_innobase::reset() row_mysql_prebuilt_free_blob_heap(prebuilt); } + if (prebuilt->compress_heap) { + row_mysql_prebuilt_free_compress_heap(prebuilt); + } + reset_template(); ds_mrr.reset(); @@ -13300,7 +13570,11 @@ ha_innobase::external_lock( && lock_type == F_WRLCK) || thd_sql_command(thd) == SQLCOM_CREATE_INDEX || thd_sql_command(thd) == SQLCOM_DROP_INDEX - || thd_sql_command(thd) == SQLCOM_DELETE)) { + || thd_sql_command(thd) == SQLCOM_DELETE + || thd_sql_command(thd) == + SQLCOM_CREATE_COMPRESSION_DICTIONARY + || thd_sql_command(thd) == + SQLCOM_DROP_COMPRESSION_DICTIONARY)) { if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE) { @@ -14062,7 +14336,9 @@ ha_innobase::store_lock( && lock_type <= TL_WRITE)) || sql_command == SQLCOM_CREATE_INDEX || sql_command == SQLCOM_DROP_INDEX - || sql_command == SQLCOM_DELETE)) { + || sql_command == SQLCOM_DELETE + || sql_command == SQLCOM_CREATE_COMPRESSION_DICTIONARY + || sql_command == SQLCOM_DROP_COMPRESSION_DICTIONARY)) { ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); @@ -15001,6 +15277,82 @@ ha_innobase::check_if_incompatible_data( return(COMPATIBLE_DATA_YES); } +/** This function reads zip dict-related info from SYS_ZIP_DICT +and SYS_ZIP_DICT_COLS for all columns marked with +COLUMN_FORMAT_TYPE_COMPRESSED flag and updates +zip_dict_name / zip_dict_data for those which have associated +compression dictionaries. +*/ +UNIV_INTERN +void +ha_innobase::update_field_defs_with_zip_dict_info() +{ + DBUG_ENTER("update_field_defs_with_zip_dict_info"); + ut_ad(!mutex_own(&dict_sys->mutex)); + + char norm_name[FN_REFLEN]; + normalize_table_name(norm_name, table_share->normalized_path.str); + + dict_table_t* ib_table = dict_table_open_on_name( + norm_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE); + + /* if dict_table_open_on_name() returns NULL, then it means that + TABLE_SHARE is populated for a table being created and we can + skip filling zip dict info here */ + if (ib_table == 0) + DBUG_VOID_RETURN; + + table_id_t ib_table_id = ib_table->id; + dict_table_close(ib_table, FALSE, FALSE); + Field* field; + for (uint i = 0; i < table_share->fields; ++i) { + field = table_share->field[i]; + if (field->column_format() == + COLUMN_FORMAT_TYPE_COMPRESSED) { + bool reference_found = false; + ulint dict_id = 0; + switch (dict_get_dictionary_id_by_key(ib_table_id, i, + &dict_id)) { + case DB_SUCCESS: + reference_found = true; + break; + case DB_RECORD_NOT_FOUND: + reference_found = false; + break; + default: + ut_error; + } + if (reference_found) { + char* local_name = 0; + ulint local_name_len = 0; + char* local_data = 0; + ulint local_data_len = 0; + if (dict_get_dictionary_info_by_id(dict_id, + &local_name, &local_name_len, + &local_data, &local_data_len) != + DB_SUCCESS) { + ut_error; + } + else { + field->zip_dict_name.str = + local_name; + field->zip_dict_name.length = + local_name_len; + field->zip_dict_data.str = + local_data; + field->zip_dict_data.length = + local_data_len; + } + } + else { + field->zip_dict_name = null_lex_cstr; + field->zip_dict_data = null_lex_cstr; + } + } + } + DBUG_VOID_RETURN; +} + /****************************************************************//** Update the system variable innodb_io_capacity_max using the "saved" value. This function is registered as a callback with MySQL. */ @@ -15555,7 +15907,12 @@ innodb_internal_table_update( my_free(old); } - fts_internal_tbl_name = *(char**) var_ptr; + fts_internal_tbl_name2 = *(char**) var_ptr; + if (fts_internal_tbl_name2 == NULL) { + fts_internal_tbl_name = const_cast("default"); + } else { + fts_internal_tbl_name = fts_internal_tbl_name2; + } } /****************************************************************//** @@ -17888,7 +18245,7 @@ static MYSQL_SYSVAR_BOOL(disable_sort_file_cache, srv_disable_sort_file_cache, "Whether to disable OS system file cache for sort I/O", NULL, NULL, FALSE); -static MYSQL_SYSVAR_STR(ft_aux_table, fts_internal_tbl_name, +static MYSQL_SYSVAR_STR(ft_aux_table, fts_internal_tbl_name2, PLUGIN_VAR_NOCMDARG, "FTS internal auxiliary table to be checked", innodb_internal_table_validate, @@ -18340,6 +18697,19 @@ static MYSQL_SYSVAR_BOOL(locking_fake_changes, srv_fake_changes_locks, "not take any locks at all.", NULL, NULL, TRUE); +static MYSQL_SYSVAR_UINT(compressed_columns_zip_level, + srv_compressed_columns_zip_level, + PLUGIN_VAR_RQCMDARG, + "Compression level used for compressed columns. 0 is no compression" + ", 1 is fastest and 9 is best compression. Default is 6.", + NULL, NULL, DEFAULT_COMPRESSION_LEVEL, 0, 9, 0); + +static MYSQL_SYSVAR_ULONG(compressed_columns_threshold, + srv_compressed_columns_threshold, + PLUGIN_VAR_RQCMDARG, + "Compress column data if its length exceeds this value. Default is 96", + NULL, NULL, 96, 1, ~0UL, 0); + static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(log_block_size), MYSQL_SYSVAR(additional_mem_pool_size), @@ -18537,6 +18907,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(fake_changes), MYSQL_SYSVAR(locking_fake_changes), MYSQL_SYSVAR(tmpdir), + MYSQL_SYSVAR(compressed_columns_zip_level), + MYSQL_SYSVAR(compressed_columns_threshold), NULL }; @@ -18559,6 +18931,8 @@ mysql_declare_plugin(innobase) i_s_xtradb_read_view, i_s_xtradb_internal_hash_tables, i_s_xtradb_rseg, +i_s_xtradb_zip_dict, +i_s_xtradb_zip_dict_cols, i_s_innodb_trx, i_s_innodb_locks, i_s_innodb_lock_waits, diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h index c9f9cfabc1f71..609787bd6a1a7 100644 --- a/storage/xtradb/handler/ha_innodb.h +++ b/storage/xtradb/handler/ha_innodb.h @@ -287,6 +287,15 @@ class ha_innobase: public handler /** @} */ bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); + + /** This function reads zip dict-related info from SYS_ZIP_DICT + and SYS_ZIP_DICT_COLS for all columns marked with + COLUMN_FORMAT_TYPE_COMPRESSED flag and updates + zip_dict_name / zip_dict_data for those which have associated + compression dictionaries. + */ + virtual void update_field_defs_with_zip_dict_info(); + private: /** Builds a 'template' to the prebuilt struct. @@ -665,3 +674,31 @@ innobase_build_index_translation( INNOBASE_SHARE* share); /*!< in/out: share structure where index translation table will be constructed in. */ + +/** This function checks if all the compression dictionaries referenced +in table->fields exist in SYS_ZIP_DICT InnoDB system table. +@return true if all referenced dictionaries exist */ +UNIV_INTERN +bool +innobase_check_zip_dicts( + const TABLE* table, /*!< in: table in MySQL data + dictionary */ + ulint* dict_ids, /*!< out: identified zip dict ids + (at least n_fields long) */ + trx_t* trx, /*!< in: transaction */ + const char** err_dict_name); /*!< out: the name of the + zip_dict which does not exist. */ + +/** This function creates compression dictionary references in +SYS_ZIP_DICT_COLS InnoDB system table for table_id based on info +in table->fields and provided zip dict ids. */ +UNIV_INTERN +void +innobase_create_zip_dict_references( + const TABLE* table, /*!< in: table in MySQL data + dictionary */ + table_id_t ib_table_id, /*!< in: table ID in Innodb data + dictionary */ + ulint* zip_dict_ids, /*!< in: zip dict ids + (at least n_fields long) */ + trx_t* trx); /*!< in: transaction */ diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index 1a39f70614d62..291ed06a9559d 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -201,7 +201,10 @@ innobase_need_rebuild( /*==================*/ const Alter_inplace_info* ha_alter_info) { - if (ha_alter_info->handler_flags + Alter_inplace_info::HA_ALTER_FLAGS alter_inplace_flags = + ha_alter_info->handler_flags & ~(INNOBASE_INPLACE_IGNORE); + + if (alter_inplace_flags == Alter_inplace_info::CHANGE_CREATE_OPTION && !(ha_alter_info->create_info->used_fields & (HA_CREATE_USED_ROW_FORMAT @@ -1069,6 +1072,15 @@ innobase_col_to_mysql( field->reset(); if (field->type() == MYSQL_TYPE_VARCHAR) { + if (field->column_format() == + COLUMN_FORMAT_TYPE_COMPRESSED) { + /* Skip compressed varchar column when + reporting an erroneous row + during index creation or table rebuild. */ + field->set_null(); + break; + } + /* This is a >= 5.0.3 type true VARCHAR. Store the length of the data to the first byte or the first two bytes of dest. */ @@ -2328,7 +2340,8 @@ innobase_build_col_map_add( mem_heap_t* heap, dfield_t* dfield, const Field* field, - ulint comp) + ulint comp, + row_prebuilt_t* prebuilt) { if (field->is_real_null()) { dfield_set_null(dfield); @@ -2340,7 +2353,10 @@ innobase_build_col_map_add( byte* buf = static_cast(mem_heap_alloc(heap, size)); row_mysql_store_col_in_innobase_format( - dfield, buf, TRUE, field->ptr, size, comp); + dfield, buf, TRUE, field->ptr, size, comp, + field->column_format() == COLUMN_FORMAT_TYPE_COMPRESSED, + reinterpret_cast(field->zip_dict_data.str), + field->zip_dict_data.length, prebuilt); } /** Construct the translation table for reordering, dropping or @@ -2365,7 +2381,8 @@ innobase_build_col_map( const dict_table_t* new_table, const dict_table_t* old_table, dtuple_t* add_cols, - mem_heap_t* heap) + mem_heap_t* heap, + row_prebuilt_t* prebuilt) { DBUG_ENTER("innobase_build_col_map"); DBUG_ASSERT(altered_table != table); @@ -2404,7 +2421,7 @@ innobase_build_col_map( innobase_build_col_map_add( heap, dtuple_get_nth_field(add_cols, i), altered_table->field[i], - dict_table_is_comp(new_table)); + dict_table_is_comp(new_table), prebuilt); found_col: i++; } @@ -2567,7 +2584,8 @@ prepare_inplace_alter_table_dict( ulint flags2, ulint fts_doc_id_col, bool add_fts_doc_id, - bool add_fts_doc_id_idx) + bool add_fts_doc_id_idx, + row_prebuilt_t* prebuilt) { bool dict_locked = false; ulint* add_key_nums; /* MySQL key numbers */ @@ -2578,6 +2596,7 @@ prepare_inplace_alter_table_dict( dberr_t error; ulint num_fts_index; ha_innobase_inplace_ctx*ctx; + ulint* zip_dict_ids = 0; DBUG_ENTER("prepare_inplace_alter_table_dict"); @@ -2712,6 +2731,18 @@ prepare_inplace_alter_table_dict( ctx->new_table->id); ulint n_cols; dtuple_t* add_cols; + const char* err_zip_dict_name = 0; + + zip_dict_ids = static_cast( + mem_heap_alloc(ctx->heap, + altered_table->s->fields * sizeof(ulint))); + + if (!innobase_check_zip_dicts(altered_table, zip_dict_ids, + ctx->trx, &err_zip_dict_name)) { + my_error(ER_COMPRESSION_DICTIONARY_DOES_NOT_EXIST, + MYF(0), err_zip_dict_name); + goto new_clustered_failed; + } if (innobase_check_foreigns( ha_alter_info, altered_table, old_table, @@ -2815,6 +2846,12 @@ prepare_inplace_alter_table_dict( } } + if (field->column_format() == + COLUMN_FORMAT_TYPE_COMPRESSED) { + field_type |= DATA_COMPRESSED; + } + + if (dict_col_name_is_reserved(field->field_name)) { dict_mem_table_free(ctx->new_table); my_error(ER_WRONG_COLUMN_NAME, MYF(0), @@ -2894,7 +2931,7 @@ prepare_inplace_alter_table_dict( ctx->col_map = innobase_build_col_map( ha_alter_info, altered_table, old_table, ctx->new_table, user_table, - add_cols, ctx->heap); + add_cols, ctx->heap, prebuilt); ctx->add_cols = add_cols; } else { DBUG_ASSERT(!innobase_need_rebuild(ha_alter_info)); @@ -3072,6 +3109,15 @@ prepare_inplace_alter_table_dict( DBUG_ASSERT(error == DB_SUCCESS); + /* + Adding compression dictionary <-> compressed table column links + to the SYS_ZIP_DICT_COLS table. + */ + if (zip_dict_ids != 0) { + innobase_create_zip_dict_references(altered_table, + ctx->trx->table_id, zip_dict_ids, ctx->trx); + } + /* Commit the data dictionary transaction in order to release the table locks on the system tables. This means that if MySQL crashes while creating a new primary key inside @@ -3767,7 +3813,7 @@ ha_innobase::prepare_inplace_alter_table( } if (!(ha_alter_info->handler_flags & INNOBASE_ALTER_DATA) - || (ha_alter_info->handler_flags + || ((ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE) == Alter_inplace_info::CHANGE_CREATE_OPTION && !innobase_need_rebuild(ha_alter_info))) { @@ -3893,7 +3939,7 @@ ha_innobase::prepare_inplace_alter_table( table_share->table_name.str, flags, flags2, fts_doc_col_no, add_fts_doc_id, - add_fts_doc_id_idx)); + add_fts_doc_id_idx, prebuilt)); } /** Alter the table structure in-place with operations @@ -3933,7 +3979,7 @@ ha_innobase::inplace_alter_table( DBUG_RETURN(false); } - if (ha_alter_info->handler_flags + if ((ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE) == Alter_inplace_info::CHANGE_CREATE_OPTION && !innobase_need_rebuild(ha_alter_info)) { goto ok_exit; diff --git a/storage/xtradb/handler/i_s.cc b/storage/xtradb/handler/i_s.cc index dfdad55ec3bef..b351e464a1eac 100644 --- a/storage/xtradb/handler/i_s.cc +++ b/storage/xtradb/handler/i_s.cc @@ -4050,6 +4050,8 @@ i_s_fts_config_fill( DBUG_RETURN(0); } + DEBUG_SYNC_C("i_s_fts_config_fille_check"); + fields = table->field; /* Prevent DDL to drop fts aux tables. */ diff --git a/storage/xtradb/handler/xtradb_i_s.cc b/storage/xtradb/handler/xtradb_i_s.cc index 213e3c1aa53d9..9176378447608 100644 --- a/storage/xtradb/handler/xtradb_i_s.cc +++ b/storage/xtradb/handler/xtradb_i_s.cc @@ -32,9 +32,11 @@ this program; if not, write to the Free Software Foundation, Inc., #include #include #include "srv0start.h" /* for srv_was_started */ +#include /* btr_pcur_t */ #include /* btr_search_sys */ #include /* recv_sys */ #include +#include /* for ZIP_DICT_MAX_* constants */ /* for XTRADB_RSEG table */ #include "trx0trx.h" /* for TRX_QUE_STATE_STR_MAX_LEN */ @@ -130,6 +132,28 @@ field_store_string( return(ret); } +/** Auxiliary function to store (char*, len) value in MYSQL_TYPE_BLOB +field. +@return 0 on success */ +static +int +field_store_blob( + Field* field, /*!< in/out: target field for storage */ + const char* data, /*!< in: pointer to data, or NULL */ + uint data_len) /*!< in: data length */ +{ + int ret; + + if (data != NULL) { + ret = field->store(data, data_len, system_charset_info); + field->set_notnull(); + } else { + ret = 0; /* success */ + field->set_null(); + } + + return(ret); +} static int @@ -603,3 +627,329 @@ UNIV_INTERN struct st_mysql_plugin i_s_xtradb_rseg = STRUCT_FLD(__reserved1, NULL), STRUCT_FLD(flags, 0UL), }; + + +/************************************************************************/ +enum zip_dict_field_type +{ + zip_dict_field_id, + zip_dict_field_name, + zip_dict_field_zip_dict +}; + +static ST_FIELD_INFO xtradb_sys_zip_dict_fields_info[] = +{ + { STRUCT_FLD(field_name, "id"), + STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE) }, + + { STRUCT_FLD(field_name, "name"), + STRUCT_FLD(field_length, ZIP_DICT_MAX_NAME_LENGTH), + STRUCT_FLD(field_type, MYSQL_TYPE_STRING), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, 0), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE) }, + + { STRUCT_FLD(field_name, "zip_dict"), + STRUCT_FLD(field_length, ZIP_DICT_MAX_DATA_LENGTH), + STRUCT_FLD(field_type, MYSQL_TYPE_BLOB), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, 0), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE) }, + + END_OF_ST_FIELD_INFO +}; + +/** Function to fill INFORMATION_SCHEMA.XTRADB_ZIP_DICT with information +collected by scanning SYS_ZIP_DICT table. +@return 0 on success */ +static +int +xtradb_i_s_dict_fill_sys_zip_dict( + THD* thd, /*!< in: thread */ + ulint id, /*!< in: dict ID */ + const char* name, /*!< in: dict name */ + const char* data, /*!< in: dict data */ + ulint data_len, /*!< in: dict data length */ + TABLE* table_to_fill) /*!< in/out: fill this table */ +{ + DBUG_ENTER("xtradb_i_s_dict_fill_sys_zip_dict"); + + Field** fields = table_to_fill->field; + + OK(field_store_ulint(fields[zip_dict_field_id], id)); + OK(field_store_string(fields[zip_dict_field_name], name)); + OK(field_store_blob(fields[zip_dict_field_zip_dict], data, + data_len)); + + OK(schema_table_store_record(thd, table_to_fill)); + + DBUG_RETURN(0); +} + +/** Function to populate INFORMATION_SCHEMA.XTRADB_ZIP_DICT table. +Loop through each record in SYS_ZIP_DICT, and extract the column +information and fill the INFORMATION_SCHEMA.XTRADB_ZIP_DICT table. +@return 0 on success */ +static +int +xtradb_i_s_sys_zip_dict_fill_table( + THD* thd, /*!< in: thread */ + TABLE_LIST* tables, /*!< in/out: tables to fill */ + Item* ) /*!< in: condition (not used) */ +{ + btr_pcur_t pcur; + const rec_t* rec; + mem_heap_t* heap; + mtr_t mtr; + + DBUG_ENTER("xtradb_i_s_sys_zip_dict_fill_table"); + RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); + + /* deny access to user without SUPER_ACL privilege */ + if (check_global_access(thd, SUPER_ACL)) { + DBUG_RETURN(0); + } + + heap = mem_heap_create(1000); + mutex_enter(&dict_sys->mutex); + mtr_start(&mtr); + + rec = dict_startscan_system(&pcur, &mtr, SYS_ZIP_DICT); + ulint zip_size = dict_table_zip_size(pcur.btr_cur.index->table); + + while (rec) { + const char* err_msg; + ulint id; + const char* name; + const char* data; + ulint data_len; + + /* Extract necessary information from a SYS_ZIP_DICT row */ + err_msg = dict_process_sys_zip_dict( + heap, zip_size, rec, &id, &name, &data, &data_len); + + mtr_commit(&mtr); + mutex_exit(&dict_sys->mutex); + + if (!err_msg) { + xtradb_i_s_dict_fill_sys_zip_dict( + thd, id, name, data, data_len, + tables->table); + } else { + push_warning_printf(thd, + Sql_condition::WARN_LEVEL_WARN, + ER_CANT_FIND_SYSTEM_REC, "%s", err_msg); + } + + mem_heap_empty(heap); + + /* Get the next record */ + mutex_enter(&dict_sys->mutex); + mtr_start(&mtr); + rec = dict_getnext_system(&pcur, &mtr); + } + + mtr_commit(&mtr); + mutex_exit(&dict_sys->mutex); + mem_heap_free(heap); + + DBUG_RETURN(0); +} + +static int i_s_xtradb_zip_dict_init(void* p) +{ + DBUG_ENTER("i_s_xtradb_zip_dict_init"); + + ST_SCHEMA_TABLE* schema = static_cast(p); + + schema->fields_info = xtradb_sys_zip_dict_fields_info; + schema->fill_table = xtradb_i_s_sys_zip_dict_fill_table; + + DBUG_RETURN(0); +} + +UNIV_INTERN struct st_mysql_plugin i_s_xtradb_zip_dict = +{ + STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN), + STRUCT_FLD(info, &i_s_info), + STRUCT_FLD(name, "XTRADB_ZIP_DICT"), + STRUCT_FLD(author, PLUGIN_AUTHOR), + STRUCT_FLD(descr, "InnoDB compression dictionaries information"), + STRUCT_FLD(license, PLUGIN_LICENSE_GPL), + STRUCT_FLD(init, i_s_xtradb_zip_dict_init), + STRUCT_FLD(deinit, i_s_common_deinit), + STRUCT_FLD(version, INNODB_VERSION_SHORT), + STRUCT_FLD(status_vars, NULL), + STRUCT_FLD(system_vars, NULL), + STRUCT_FLD(__reserved1, NULL), + STRUCT_FLD(flags, 0UL), +}; + +enum zip_dict_cols_field_type +{ + zip_dict_cols_field_table_id, + zip_dict_cols_field_column_pos, + zip_dict_cols_field_dict_id +}; + +static ST_FIELD_INFO xtradb_sys_zip_dict_cols_fields_info[] = +{ + { STRUCT_FLD(field_name, "table_id"), + STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE) }, + + { STRUCT_FLD(field_name, "column_pos"), + STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE) }, + + { STRUCT_FLD(field_name, "dict_id"), + STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE) }, + + END_OF_ST_FIELD_INFO +}; + +/** Function to fill INFORMATION_SCHEMA.XTRADB_ZIP_DICT_COLS with information +collected by scanning SYS_ZIP_DICT_COLS table. +@return 0 on success */ +static +int +xtradb_i_s_dict_fill_sys_zip_dict_cols( + THD* thd, /*!< in: thread */ + ulint table_id, /*!< in: table ID */ + ulint column_pos, /*!< in: column position */ + ulint dict_id, /*!< in: dict ID */ + TABLE* table_to_fill) /*!< in/out: fill this table */ +{ + DBUG_ENTER("xtradb_i_s_dict_fill_sys_zip_dict_cols"); + + Field** fields = table_to_fill->field; + + OK(field_store_ulint(fields[zip_dict_cols_field_table_id], + table_id)); + OK(field_store_ulint(fields[zip_dict_cols_field_column_pos], + column_pos)); + OK(field_store_ulint(fields[zip_dict_cols_field_dict_id], + dict_id)); + + OK(schema_table_store_record(thd, table_to_fill)); + + DBUG_RETURN(0); +} + +/** Function to populate INFORMATION_SCHEMA.XTRADB_ZIP_DICT_COLS table. +Loop through each record in SYS_ZIP_DICT_COLS, and extract the column +information and fill the INFORMATION_SCHEMA.XTRADB_ZIP_DICT_COLS table. +@return 0 on success */ +static +int +xtradb_i_s_sys_zip_dict_cols_fill_table( + THD* thd, /*!< in: thread */ + TABLE_LIST* tables, /*!< in/out: tables to fill */ + Item* ) /*!< in: condition (not used) */ +{ + btr_pcur_t pcur; + const rec_t* rec; + mem_heap_t* heap; + mtr_t mtr; + + DBUG_ENTER("xtradb_i_s_sys_zip_dict_cols_fill_table"); + RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); + + /* deny access to user without SUPER_ACL privilege */ + if (check_global_access(thd, SUPER_ACL)) { + DBUG_RETURN(0); + } + + heap = mem_heap_create(1000); + mutex_enter(&dict_sys->mutex); + mtr_start(&mtr); + + rec = dict_startscan_system(&pcur, &mtr, SYS_ZIP_DICT_COLS); + + while (rec) { + const char* err_msg; + ulint table_id; + ulint column_pos; + ulint dict_id; + + /* Extract necessary information from a SYS_ZIP_DICT_COLS + row */ + err_msg = dict_process_sys_zip_dict_cols( + heap, rec, &table_id, &column_pos, &dict_id); + + mtr_commit(&mtr); + mutex_exit(&dict_sys->mutex); + + if (!err_msg) { + xtradb_i_s_dict_fill_sys_zip_dict_cols( + thd, table_id, column_pos, dict_id, + tables->table); + } else { + push_warning_printf(thd, + Sql_condition::WARN_LEVEL_WARN, + ER_CANT_FIND_SYSTEM_REC, "%s", err_msg); + } + + mem_heap_empty(heap); + + /* Get the next record */ + mutex_enter(&dict_sys->mutex); + mtr_start(&mtr); + rec = dict_getnext_system(&pcur, &mtr); + } + + mtr_commit(&mtr); + mutex_exit(&dict_sys->mutex); + mem_heap_free(heap); + + DBUG_RETURN(0); +} + +static int i_s_xtradb_zip_dict_cols_init(void* p) +{ + DBUG_ENTER("i_s_xtradb_zip_dict_cols_init"); + + ST_SCHEMA_TABLE* schema = static_cast(p); + + schema->fields_info = xtradb_sys_zip_dict_cols_fields_info; + schema->fill_table = xtradb_i_s_sys_zip_dict_cols_fill_table; + + DBUG_RETURN(0); +} + +UNIV_INTERN struct st_mysql_plugin i_s_xtradb_zip_dict_cols = +{ + STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN), + STRUCT_FLD(info, &i_s_info), + STRUCT_FLD(name, "XTRADB_ZIP_DICT_COLS"), + STRUCT_FLD(author, PLUGIN_AUTHOR), + STRUCT_FLD(descr, "InnoDB compressed columns information"), + STRUCT_FLD(license, PLUGIN_LICENSE_GPL), + STRUCT_FLD(init, i_s_xtradb_zip_dict_cols_init), + STRUCT_FLD(deinit, i_s_common_deinit), + STRUCT_FLD(version, INNODB_VERSION_SHORT), + STRUCT_FLD(status_vars, NULL), + STRUCT_FLD(system_vars, NULL), + STRUCT_FLD(__reserved1, NULL), + STRUCT_FLD(flags, 0UL), +}; diff --git a/storage/xtradb/handler/xtradb_i_s.h b/storage/xtradb/handler/xtradb_i_s.h index 2f7552c565adc..905d84587affd 100644 --- a/storage/xtradb/handler/xtradb_i_s.h +++ b/storage/xtradb/handler/xtradb_i_s.h @@ -22,5 +22,7 @@ this program; if not, write to the Free Software Foundation, Inc., extern struct st_mysql_plugin i_s_xtradb_read_view; extern struct st_mysql_plugin i_s_xtradb_internal_hash_tables; extern struct st_mysql_plugin i_s_xtradb_rseg; +extern struct st_mysql_plugin i_s_xtradb_zip_dict; +extern struct st_mysql_plugin i_s_xtradb_zip_dict_cols; #endif /* XTRADB_I_S_H */ diff --git a/storage/xtradb/include/data0type.h b/storage/xtradb/include/data0type.h index 111664b0b527f..f269c266efb9b 100644 --- a/storage/xtradb/include/data0type.h +++ b/storage/xtradb/include/data0type.h @@ -170,6 +170,9 @@ be less than 256 */ type when the column is true VARCHAR where MySQL uses 2 bytes to store the data len; for shorter VARCHARs MySQL uses only 1 byte */ +#define DATA_COMPRESSED 16384 /* this is ORed to the precise data + type when the column has COLUMN_FORMAT = + COMPRESSED attribute*/ /*-------------------------------------------*/ /* This many bytes we need to store the type information affecting the @@ -500,6 +503,17 @@ dtype_print( /*========*/ const dtype_t* type); /*!< in: type */ +/** +Calculates the number of extra bytes needed for compression header +depending on precise column type. +@reval 0 if prtype does not include DATA_COMPRESSED flag +@reval ZIP_COLUMN_HEADER_LENGTH if prtype includes DATA_COMPRESSED flag +*/ +UNIV_INLINE +ulint +prtype_get_compression_extra( + ulint prtype); /*!< in: precise type */ + /* Structure for an SQL data type. If you add fields to this structure, be sure to initialize them everywhere. This structure is initialized in the following functions: diff --git a/storage/xtradb/include/data0type.ic b/storage/xtradb/include/data0type.ic index d489bef89a8bb..29dc480a19c3e 100644 --- a/storage/xtradb/include/data0type.ic +++ b/storage/xtradb/include/data0type.ic @@ -26,6 +26,7 @@ Created 1/16/1996 Heikki Tuuri #include /* strlen() */ #include "mach0data.h" +#include "rem0types.h" /* ZIP_COLUMN_HEADER_LENGTH */ #ifndef UNIV_HOTBACKUP # include "ha_prototypes.h" @@ -709,3 +710,18 @@ dtype_get_sql_null_size( 0, 0)); #endif /* !UNIV_HOTBACKUP */ } + +/** +Calculates the number of extra bytes needed for compression header +depending on precise column type. +@reval 0 if prtype does not include DATA_COMPRESSED flag +@reval ZIP_COLUMN_HEADER_LENGTH if prtype includes DATA_COMPRESSED flag +*/ +UNIV_INLINE +ulint +prtype_get_compression_extra( + ulint prtype) /*!< in: precise type */ +{ + return (prtype & DATA_COMPRESSED) != 0 ? + ZIP_COLUMN_HEADER_LENGTH : 0; +} diff --git a/storage/xtradb/include/dict0boot.h b/storage/xtradb/include/dict0boot.h index 477e1150f437b..d5bee886cbf43 100644 --- a/storage/xtradb/include/dict0boot.h +++ b/storage/xtradb/include/dict0boot.h @@ -324,6 +324,38 @@ enum dict_fld_sys_datafiles_enum { DICT_FLD__SYS_DATAFILES__PATH = 3, DICT_NUM_FIELDS__SYS_DATAFILES = 4 }; +/* The columns in SYS_DICT */ +enum dict_col_sys_zip_dict_enum { + DICT_COL__SYS_ZIP_DICT__ID = 0, + DICT_COL__SYS_ZIP_DICT__NAME = 1, + DICT_COL__SYS_ZIP_DICT__DATA = 2, + DICT_NUM_COLS__SYS_ZIP_DICT = 3 +}; +/* The field numbers in the SYS_DICT clustered index */ +enum dict_fld_sys_zip_dict_enum { + DICT_FLD__SYS_ZIP_DICT__ID = 0, + DICT_FLD__SYS_ZIP_DICT__DB_TRX_ID = 1, + DICT_FLD__SYS_ZIP_DICT__DB_ROLL_PTR = 2, + DICT_FLD__SYS_ZIP_DICT__NAME = 3, + DICT_FLD__SYS_ZIP_DICT__DATA = 4, + DICT_NUM_FIELDS__SYS_ZIP_DICT = 5 +}; +/* The columns in SYS_DICT_COLS */ +enum dict_col_sys_zip_dict_cols_enum { + DICT_COL__SYS_ZIP_DICT_COLS__TABLE_ID = 0, + DICT_COL__SYS_ZIP_DICT_COLS__COLUMN_POS = 1, + DICT_COL__SYS_ZIP_DICT_COLS__DICT_ID = 2, + DICT_NUM_COLS__SYS_ZIP_DICT_COLS = 3 +}; +/* The field numbers in the SYS_DICT_COLS clustered index */ +enum dict_fld_sys_zip_dict_cols_enum { + DICT_FLD__SYS_ZIP_DICT_COLS__TABLE_ID = 0, + DICT_FLD__SYS_ZIP_DICT_COLS__COLUMN_POS = 1, + DICT_FLD__SYS_ZIP_DICT_COLS__DB_TRX_ID = 2, + DICT_FLD__SYS_ZIP_DICT_COLS__DB_ROLL_PTR = 3, + DICT_FLD__SYS_ZIP_DICT_COLS__DICT_ID = 4, + DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS = 5 +}; /* A number of the columns above occur in multiple tables. These are the length of thos fields. */ diff --git a/storage/xtradb/include/dict0crea.h b/storage/xtradb/include/dict0crea.h index 6146917469a84..33877b678345f 100644 --- a/storage/xtradb/include/dict0crea.h +++ b/storage/xtradb/include/dict0crea.h @@ -152,6 +152,19 @@ UNIV_INTERN dberr_t dict_create_or_check_sys_tablespace(void); /*=====================================*/ + +#define ZIP_DICT_MAX_NAME_LENGTH 64 +/* Max window size (2^15) minus 262 */ +#define ZIP_DICT_MAX_DATA_LENGTH 32506 + +/** Creates the zip_dict system table inside InnoDB +at server bootstrap or server start if it is not found or is +not of the right form. +@return DB_SUCCESS or error code */ +UNIV_INTERN +dberr_t +dict_create_or_check_sys_zip_dict(void); + /********************************************************************//** Add a single tablespace definition to the data dictionary tables in the database. @@ -167,6 +180,84 @@ dict_create_add_tablespace_to_dictionary( trx_t* trx, /*!< in: transaction */ bool commit); /*!< in: if true then commit the transaction */ + +/** Add a single compression dictionary definition to the SYS_ZIP_DICT +InnoDB system table. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_add_zip_dict( + const char* name, /*!< in: dict name */ + ulint name_len, /*!< in: dict name length */ + const char* data, /*!< in: dict data */ + ulint data_len, /*!< in: dict data length */ + trx_t* trx); /*!< in/out: transaction */ + +/** Add a single compression dictionary reference to the SYS_ZIP_DICT_COLS +InnoDB system table. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_add_zip_dict_reference( + ulint table_id, /*!< in: table id */ + ulint column_pos, /*!< in: column position */ + ulint dict_id, /*!< in: dict id */ + trx_t* trx); /*!< in/out: transaction */ + +/** Get a single compression dictionary id for the given +(table id, column pos) pair. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_get_zip_dict_id_by_reference( + ulint table_id, /*!< in: table id */ + ulint column_pos, /*!< in: column position */ + ulint* dict_id, /*!< out: dict id */ + trx_t* trx); /*!< in/out: transaction */ + +/** Get compression dictionary id for the given name. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_get_zip_dict_id_by_name( + const char* dict_name, /*!< in: dict name */ + ulint dict_name_len, /*!< in: dict name length */ + ulint* dict_id, /*!< out: dict id */ + trx_t* trx); /*!< in/out: transaction */ + +/** Get compression dictionary info (name and data) for the given id. +Allocates memory for name and data on success. +Must be freed with mem_free(). +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_get_zip_dict_info_by_id( + ulint dict_id, /*!< in: dict id */ + char** name, /*!< out: dict name */ + ulint* name_len, /*!< out: dict name length */ + char** data, /*!< out: dict data */ + ulint* data_len, /*!< out: dict data length */ + trx_t* trx); /*!< in/out: transaction */ + +/** Remove a single compression dictionary from the data dictionary +tables in the database. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_remove_zip_dict( + const char* name, /*!< in: dict name */ + ulint name_len, /*!< in: dict name length */ + trx_t* trx); /*!< in/out: transaction */ + +/** Remove all compression dictionary references for the given table ID from +the data dictionary tables in the database. +@return error code or DB_SUCCESS */ +UNIV_INTERN +dberr_t +dict_create_remove_zip_dict_references_for_table( + ulint table_id, /*!< in: table id */ + trx_t* trx); /*!< in/out: transaction */ + /********************************************************************//** Add a foreign key definition to the data dictionary tables. @return error code or DB_SUCCESS */ diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h index f539f62960bfe..870b142ba3261 100644 --- a/storage/xtradb/include/dict0dict.h +++ b/storage/xtradb/include/dict0dict.h @@ -1845,6 +1845,52 @@ dict_table_set_corrupt_by_space( ulint space_id, ibool need_mutex); +/** Insert a records into SYS_ZIP_DICT. +@retval DB_SUCCESS if OK +@retval dberr_t if the insert failed */ +UNIV_INTERN +dberr_t +dict_create_zip_dict( + const char* name, /*!< in: zip_dict name */ + ulint name_len, /*!< in: zip_dict name length*/ + const char* data, /*!< in: zip_dict data */ + ulint data_len); /*!< in: zip_dict data length */ + +/** Get single compression dictionary id for the given +(table id, column pos) pair. +@retval DB_SUCCESS if OK +@retval DB_RECORD_NOT_FOUND if not found */ +UNIV_INTERN +dberr_t +dict_get_dictionary_id_by_key( + ulint table_id, /*!< in: table id */ + ulint column_pos, /*!< in: column position */ + ulint* dict_id); /*!< out: zip_dict id */ + +/** Get compression dictionary info (name and data) for the given id. +Allocates memory in name->str and data->str on success. +Must be freed with mem_free(). +@retval DB_SUCCESS if OK +@retval DB_RECORD_NOT_FOUND if not found */ +UNIV_INTERN +dberr_t +dict_get_dictionary_info_by_id( + ulint dict_id, /*!< in: table name */ + char** name, /*!< out: dictionary name */ + ulint* name_len, /*!< out: dictionary name length*/ + char** data, /*!< out: dictionary data */ + ulint* data_len); /*!< out: dictionary data length*/ + +/** Delete a record in SYS_ZIP_DICT with the given name. +@retval DB_SUCCESS if OK +@retval DB_RECORD_NOT_FOUND if not found +@retval DB_ROW_IS_REFERENCED if in use */ +UNIV_INTERN +dberr_t +dict_drop_zip_dict( + const char* name, /*!< in: zip_dict name */ + ulint name_len); /*!< in: zip_dict name length*/ + #ifndef UNIV_NONINL #include "dict0dict.ic" #endif diff --git a/storage/xtradb/include/dict0load.h b/storage/xtradb/include/dict0load.h index dcbc3de8e942f..85e3e5656371b 100644 --- a/storage/xtradb/include/dict0load.h +++ b/storage/xtradb/include/dict0load.h @@ -44,6 +44,8 @@ enum dict_system_id_t { SYS_FOREIGN_COLS, SYS_TABLESPACES, SYS_DATAFILES, + SYS_ZIP_DICT, + SYS_ZIP_DICT_COLS, /* This must be last item. Defines the number of system tables. */ SYS_NUM_SYSTEM_TABLES @@ -386,6 +388,33 @@ dict_process_sys_datafiles( const rec_t* rec, /*!< in: current SYS_DATAFILES rec */ ulint* space, /*!< out: pace id */ const char** path); /*!< out: datafile path */ + +/** This function parses a SYS_ZIP_DICT record, extracts necessary +information from the record and returns to caller. +@return error message, or NULL on success */ +UNIV_INTERN +const char* +dict_process_sys_zip_dict( + mem_heap_t* heap, /*!< in/out: heap memory */ + ulint zip_size, /*!< in: nonzero=compressed BLOB page size */ + const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */ + ulint* id, /*!< out: dict id */ + const char** name, /*!< out: dict name */ + const char** data, /*!< out: dict data */ + ulint* data_len); /*!< out: dict data length */ + +/** This function parses a SYS_ZIP_DICT_COLS record, extracts necessary +information from the record and returns to caller. +@return error message, or NULL on success */ +UNIV_INTERN +const char* +dict_process_sys_zip_dict_cols( + mem_heap_t* heap, /*!< in/out: heap memory */ + const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */ + ulint* table_id, /*!< out: table id */ + ulint* column_pos, /*!< out: column position */ + ulint* dict_id); /*!< out: dict id */ + /********************************************************************//** Get the filepath for a spaceid from SYS_DATAFILES. This function provides a temporary heap which is used for the table lookup, but not for the path. diff --git a/storage/xtradb/include/fts0fts.h b/storage/xtradb/include/fts0fts.h index 87b5787d416be..3e2f359bbebef 100644 --- a/storage/xtradb/include/fts0fts.h +++ b/storage/xtradb/include/fts0fts.h @@ -375,6 +375,7 @@ extern bool fts_need_sync; /** Variable specifying the table that has Fulltext index to display its content through information schema table */ extern char* fts_internal_tbl_name; +extern char* fts_internal_tbl_name2; #define fts_que_graph_free(graph) \ do { \ @@ -823,6 +824,15 @@ void fts_drop_orphaned_tables(void); /*==========================*/ +/* Get parent table name if it's a fts aux table +@param[in] aux_table_name aux table name +@param[in] aux_table_len aux table length +@return parent table name, or NULL */ +char* +fts_get_parent_table_name( + const char* aux_table_name, + ulint aux_table_len); + /******************************************************************//** Since we do a horizontal split on the index table, we need to drop all the split tables. diff --git a/storage/xtradb/include/os0thread.h b/storage/xtradb/include/os0thread.h index e36f836e0be34..44ff5e6757e4a 100644 --- a/storage/xtradb/include/os0thread.h +++ b/storage/xtradb/include/os0thread.h @@ -131,14 +131,27 @@ os_thread_create_func( os_thread_id_t* thread_id); /*!< out: id of the created thread, or NULL */ +/** +Waits until the specified thread completes and joins it. Its return value is +ignored. + +@param thread thread to join */ +UNIV_INTERN +void +os_thread_join( + os_thread_t thread); + /*****************************************************************//** Exits the current thread. */ UNIV_INTERN void os_thread_exit( /*===========*/ - void* exit_value) /*!< in: exit value; in Windows this void* + void* exit_value, /*!< in: exit value; in Windows this void* is cast as a DWORD */ + bool detach = true) /*!< in: if true, the thread will be detached + right before exiting. If false, another thread + is responsible for joining this thread. */ UNIV_COLD MY_ATTRIBUTE((noreturn)); /*****************************************************************//** Returns the thread identifier of current thread. diff --git a/storage/xtradb/include/rem0types.h b/storage/xtradb/include/rem0types.h index f8133f77466d9..5da96066f8851 100644 --- a/storage/xtradb/include/rem0types.h +++ b/storage/xtradb/include/rem0types.h @@ -71,4 +71,7 @@ enum rec_format_enum { }; typedef enum rec_format_enum rec_format_t; +/** Compressed field header size in bytes */ +#define ZIP_COLUMN_HEADER_LENGTH 2 + #endif diff --git a/storage/xtradb/include/row0mysql.h b/storage/xtradb/include/row0mysql.h index fc1846b76f312..27d3adfc7f07c 100644 --- a/storage/xtradb/include/row0mysql.h +++ b/storage/xtradb/include/row0mysql.h @@ -41,6 +41,9 @@ struct SysIndexCallback; extern ibool row_rollback_on_timeout; +extern uint srv_compressed_columns_zip_level; +extern ulong srv_compressed_columns_threshold; + struct row_prebuilt_t; /*******************************************************************//** @@ -51,6 +54,49 @@ row_mysql_prebuilt_free_blob_heap( /*==============================*/ row_prebuilt_t* prebuilt); /*!< in: prebuilt struct of a ha_innobase:: table handle */ + +/** Frees the compress heap in prebuilt when no longer needed. */ +UNIV_INTERN +void +row_mysql_prebuilt_free_compress_heap( + row_prebuilt_t* prebuilt); /*!< in: prebuilt struct of a + ha_innobase:: table handle */ + +/** Uncompress blob/text/varchar column using zlib +@return pointer to the uncompressed data */ +const byte* +row_decompress_column( + const byte* data, /*!< in: data in innodb(compressed) format */ + ulint *len, /*!< in: data length; out: length of + decompressed data*/ + const byte* dict_data, + /*!< in: optional dictionary data used for + decompression */ + ulint dict_data_len, + /*!< in: optional dictionary data length */ + row_prebuilt_t* prebuilt); + /*!< in: use prebuilt->compress_heap only + here*/ + +/** Compress blob/text/varchar column using zlib +@return pointer to the compressed data */ +byte* +row_compress_column( + const byte* data, /*!< in: data in mysql(uncompressed) + format */ + ulint *len, /*!< in: data length; out: length of + compressed data*/ + ulint lenlen, /*!< in: bytes used to store the length of + data */ + const byte* dict_data, + /*!< in: optional dictionary data used for + compression */ + ulint dict_data_len, + /*!< in: optional dictionary data length */ + row_prebuilt_t* prebuilt); + /*!< in: use prebuilt->compress_heap only + here*/ + /*******************************************************************//** Stores a >= 5.0.3 format true VARCHAR length to dest, in the MySQL row format. @@ -89,10 +135,21 @@ row_mysql_store_blob_ref( to 4 bytes */ const void* data, /*!< in: BLOB data; if the value to store is SQL NULL this should be NULL pointer */ - ulint len); /*!< in: BLOB length; if the value to store + ulint len, /*!< in: BLOB length; if the value to store is SQL NULL this should be 0; remember also to set the NULL bit in the MySQL record header! */ + bool need_decompression, + /*!< in: if the data need to be compressed*/ + const byte* dict_data, + /*!< in: optional compression dictionary + data */ + ulint dict_data_len, + /*!< in: optional compression dictionary data + length */ + row_prebuilt_t* prebuilt); + /*compress_heap only + here */ /*******************************************************************//** Reads a reference to a BLOB in the MySQL format. @return pointer to BLOB data */ @@ -103,8 +160,17 @@ row_mysql_read_blob_ref( ulint* len, /*!< out: BLOB length */ const byte* ref, /*!< in: BLOB reference in the MySQL format */ - ulint col_len); /*!< in: BLOB reference length + ulint col_len, /*!< in: BLOB reference length (not BLOB length) */ + bool need_compression, + /*!< in: if the data need to be + compressed*/ + const byte* dict_data, /*!< in: optional compression + dictionary data */ + ulint dict_data_len, /*!< in: optional compression + dictionary data length */ + row_prebuilt_t* prebuilt); /*!< in: use prebuilt->compress_heap + only here */ /**************************************************************//** Pad a column with spaces. */ UNIV_INTERN @@ -152,7 +218,16 @@ row_mysql_store_col_in_innobase_format( necessarily the length of the actual payload data; if the column is a true VARCHAR then this is irrelevant */ - ulint comp); /*!< in: nonzero=compact format */ + ulint comp, /*!< in: nonzero=compact format */ + bool need_compression, + /*!< in: if the data need to be + compressed */ + const byte* dict_data, /*!< in: optional compression + dictionary data */ + ulint dict_data_len, /*!< in: optional compression + dictionary data length */ + row_prebuilt_t* prebuilt); /*!< in: use prebuilt->compress_heap + only here */ /****************************************************************//** Handles user errors and lock waits detected by the database engine. @return true if it was a lock wait and we should continue running the @@ -643,6 +718,8 @@ struct mysql_row_templ_t { ulint is_unsigned; /*!< if a column type is an integer type and this field is != 0, then it is an unsigned integer type */ + bool compressed; /*!< if column format is compressed */ + LEX_CSTRING zip_dict_data; /*!< associated compression dictionary */ }; #define MYSQL_FETCH_CACHE_SIZE 8 @@ -839,6 +916,8 @@ struct row_prebuilt_t { in fetch_cache */ mem_heap_t* blob_heap; /*!< in SELECTS BLOB fields are copied to this heap */ + mem_heap_t* compress_heap; /*!< memory heap used to compress + /decompress blob column*/ mem_heap_t* old_vers_heap; /*!< memory heap where a previous version is built in consistent read */ bool in_fts_query; /*!< Whether we are in a FTS query */ diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h index 692d339608a2e..09f305091c2b0 100644 --- a/storage/xtradb/include/srv0srv.h +++ b/storage/xtradb/include/srv0srv.h @@ -487,6 +487,9 @@ extern ibool srv_priority_boost; extern ulint srv_truncated_status_writes; extern ulint srv_available_undo_logs; +extern ulint srv_column_compressed; +extern ulint srv_column_decompressed; + extern ulint srv_mem_pool_size; extern ulint srv_lock_table_size; @@ -1079,6 +1082,8 @@ struct export_var_t{ ulint innodb_purge_view_trx_id_age; /*!< rw_max_trx_id - purged view's min trx_id */ #endif /* UNIV_DEBUG */ + ulint innodb_column_compressed; /*!< srv_column_compressed */ + ulint innodb_column_decompressed; /*!< srv_column_decompressed */ }; /** Thread slot in the thread table. */ diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index 4d64e3249c0b4..296c04d9f621b 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -47,7 +47,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_BUGFIX MYSQL_VERSION_PATCH #ifndef PERCONA_INNODB_VERSION -#define PERCONA_INNODB_VERSION 78.1 +#define PERCONA_INNODB_VERSION 79.0 #endif /* Enable UNIV_LOG_ARCHIVE in XtraDB */ diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc index 0768bb6bb003a..7784e8538b7c5 100644 --- a/storage/xtradb/log/log0log.cc +++ b/storage/xtradb/log/log0log.cc @@ -975,6 +975,7 @@ log_init(void) log_sys->next_checkpoint_no = 0; log_sys->last_checkpoint_lsn = log_sys->lsn; + log_sys->next_checkpoint_lsn = log_sys->lsn; log_sys->n_pending_checkpoint_writes = 0; @@ -1891,6 +1892,7 @@ log_complete_checkpoint(void) log_sys->next_checkpoint_no++; + ut_ad(log_sys->next_checkpoint_lsn >= log_sys->last_checkpoint_lsn); log_sys->last_checkpoint_lsn = log_sys->next_checkpoint_lsn; MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, log_sys->lsn - log_sys->last_checkpoint_lsn); @@ -1978,11 +1980,17 @@ log_group_checkpoint( ulint i; ut_ad(!srv_read_only_mode); + ut_ad(srv_shutdown_state != SRV_SHUTDOWN_LAST_PHASE); ut_ad(mutex_own(&(log_sys->mutex))); ut_a(LOG_CHECKPOINT_SIZE <= OS_FILE_LOG_BLOCK_SIZE); buf = group->checkpoint_buf; +#ifdef UNIV_DEBUG + lsn_t old_next_checkpoint_lsn + = mach_read_from_8(buf + LOG_CHECKPOINT_LSN); + ut_ad(old_next_checkpoint_lsn <= log_sys->next_checkpoint_lsn); +#endif /* UNIV_DEBUG */ mach_write_to_8(buf + LOG_CHECKPOINT_NO, log_sys->next_checkpoint_no); mach_write_to_8(buf + LOG_CHECKPOINT_LSN, log_sys->next_checkpoint_lsn); @@ -2242,6 +2250,7 @@ log_checkpoint( return(FALSE); } + ut_ad(oldest_lsn >= log_sys->next_checkpoint_lsn); log_sys->next_checkpoint_lsn = oldest_lsn; #ifdef UNIV_DEBUG @@ -3490,13 +3499,15 @@ logs_empty_and_mark_files_at_shutdown(void) before proceeding further. */ srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE; count = 0; - while (buf_page_cleaner_is_active) { - ++count; - os_thread_sleep(100000); - if (srv_print_verbose_log && count > 600) { + while (buf_page_cleaner_is_active || buf_lru_manager_is_active) { + if (srv_print_verbose_log && count == 0) { ib_logf(IB_LOG_LEVEL_INFO, "Waiting for page_cleaner to " "finish flushing of buffer pool"); + } + ++count; + os_thread_sleep(100000); + if (count > 600) { count = 0; } } @@ -3664,6 +3675,7 @@ logs_empty_and_mark_files_at_shutdown(void) ut_a(freed); ut_a(lsn == log_sys->lsn); + ut_ad(lsn == log_sys->last_checkpoint_lsn); if (lsn < srv_start_lsn) { ib_logf(IB_LOG_LEVEL_ERROR, diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc index d80cb2ad4471d..46f544178d2ff 100644 --- a/storage/xtradb/log/log0online.cc +++ b/storage/xtradb/log/log0online.cc @@ -441,6 +441,7 @@ log_online_track_missing_on_startup( current server startup */ { ut_ad(last_tracked_lsn != tracking_start_lsn); + ut_ad(srv_track_changed_pages); ib_logf(IB_LOG_LEVEL_WARN, "last tracked LSN in \'%s\' is " LSN_PF ", but the last checkpoint LSN is " LSN_PF ". This might be " @@ -623,6 +624,8 @@ log_online_read_init(void) compile_time_assert(MODIFIED_PAGE_BLOCK_BITMAP % 8 == 0); compile_time_assert(MODIFIED_PAGE_BLOCK_BITMAP_LEN % 8 == 0); + ut_ad(srv_track_changed_pages); + log_bmp_sys = static_cast (ut_malloc(sizeof(*log_bmp_sys))); log_bmp_sys->read_buf_ptr = static_cast @@ -1097,10 +1100,15 @@ log_online_write_bitmap_page( { ibool success; + ut_ad(srv_track_changed_pages); ut_ad(mutex_own(&log_bmp_sys->mutex)); /* Simulate a write error */ - DBUG_EXECUTE_IF("bitmap_page_write_error", return FALSE;); + DBUG_EXECUTE_IF("bitmap_page_write_error", + ib_logf(IB_LOG_LEVEL_ERROR, + "simulating bitmap write error in " + "log_online_write_bitmap_page"); + return FALSE;); success = os_file_write(log_bmp_sys->out.name, log_bmp_sys->out.file, block, log_bmp_sys->out.offset, @@ -1190,7 +1198,9 @@ log_online_write_bitmap(void) rbt_next(log_bmp_sys->modified_pages, bmp_tree_node); DBUG_EXECUTE_IF("bitmap_page_2_write_error", - DBUG_SET("+d,bitmap_page_write_error");); + ut_ad(bmp_tree_node); /* 2nd page must exist */ + DBUG_SET("+d,bitmap_page_write_error"); + DBUG_SET("-d,bitmap_page_2_write_error");); } rbt_reset(log_bmp_sys->modified_pages); @@ -1211,15 +1221,11 @@ log_online_follow_redo_log(void) log_group_t* group; ibool result; - mutex_enter(&log_bmp_sys->mutex); - - if (!srv_track_changed_pages) { - mutex_exit(&log_bmp_sys->mutex); - return FALSE; - } - + ut_ad(srv_track_changed_pages); ut_ad(!srv_read_only_mode); + mutex_enter(&log_bmp_sys->mutex); + /* Grab the LSN of the last checkpoint, we will parse up to it */ mutex_enter(&(log_sys->mutex)); log_bmp_sys->end_lsn = log_sys->last_checkpoint_lsn; @@ -1562,9 +1568,12 @@ log_online_diagnose_bitmap_eof( /* It's a "Warning" here because it's not a fatal error for the whole server */ ib_logf(IB_LOG_LEVEL_WARN, - "changed page bitmap file \'%s\' does not " - "contain a complete run at the end.", - bitmap_file->name); + "changed page bitmap file \'%s\', size " + UINT64PF " bytes, does not " + "contain a complete run at the next read " + "offset " UINT64PF, + bitmap_file->name, bitmap_file->size, + bitmap_file->offset); return FALSE; } } diff --git a/storage/xtradb/mach/mach0data.cc b/storage/xtradb/mach/mach0data.cc index df68aab8a1874..206434dc5ab37 100644 --- a/storage/xtradb/mach/mach0data.cc +++ b/storage/xtradb/mach/mach0data.cc @@ -56,7 +56,18 @@ mach_parse_compressed( *val = flag; return(ptr + 1); - } else if (flag < 0xC0UL) { + } + + /* Workaround GCC bug + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=77673: + the compiler moves mach_read_from_4 right to the beginning of the + function, causing and out-of-bounds read if we are reading a short + integer close to the end of buffer. */ +#if defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__clang__) + asm volatile("": : :"memory"); +#endif + + if (flag < 0xC0UL) { if (end_ptr < ptr + 2) { return(NULL); } diff --git a/storage/xtradb/os/os0thread.cc b/storage/xtradb/os/os0thread.cc index 1d417f9823c47..93f45e060f8fd 100644 --- a/storage/xtradb/os/os0thread.cc +++ b/storage/xtradb/os/os0thread.cc @@ -210,14 +210,33 @@ os_thread_create_func( #endif } +/** +Waits until the specified thread completes and joins it. Its return value is +ignored. + +@param thread thread to join */ +UNIV_INTERN +void +os_thread_join( + os_thread_t thread) +{ + int ret MY_ATTRIBUTE((unused)) = pthread_join(thread, NULL); + + /* Waiting on already-quit threads is allowed */ + ut_ad(ret == 0 || ret == ESRCH); +} + /*****************************************************************//** Exits the current thread. */ UNIV_INTERN void os_thread_exit( /*===========*/ - void* exit_value) /*!< in: exit value; in Windows this void* + void* exit_value, /*!< in: exit value; in Windows this void* is cast as a DWORD */ + bool detach) /*!< in: if true, the thread will be detached + right before exiting. If false, another thread + is responsible for joining this thread. */ { #ifdef UNIV_DEBUG_THREAD_CREATION fprintf(stderr, "Thread exits, id %lu\n", @@ -233,7 +252,8 @@ os_thread_exit( #ifdef __WIN__ ExitThread((DWORD) exit_value); #else - pthread_detach(pthread_self()); + if (detach) + pthread_detach(pthread_self()); pthread_exit(exit_value); #endif } diff --git a/storage/xtradb/rem/rem0rec.cc b/storage/xtradb/rem/rem0rec.cc index a95e9c236139e..09cd810cd7b08 100644 --- a/storage/xtradb/rem/rem0rec.cc +++ b/storage/xtradb/rem/rem0rec.cc @@ -320,7 +320,8 @@ rec_init_offsets_comp_ordinary( stored in one byte for 0..127. The length will be encoded in two bytes when it is 128 or more, or when the field is stored externally. */ - if (UNIV_UNLIKELY(col->len > 255) + if (UNIV_UNLIKELY(col->len > 255 - + prtype_get_compression_extra(col->prtype)) || UNIV_UNLIKELY(col->mtype == DATA_BLOB)) { if (len & 0x80) { @@ -841,8 +842,12 @@ rec_get_converted_size_comp_prefix_low( continue; } - ut_ad(len <= col->len || col->mtype == DATA_BLOB - || (col->len == 0 && col->mtype == DATA_VARCHAR)); + ut_ad(len <= col->len || col->mtype == DATA_BLOB || + ((col->mtype == DATA_VARCHAR || col->mtype == DATA_BINARY + || col->mtype == DATA_VARMYSQL) + && (col->len == 0 + || len <= col->len + + prtype_get_compression_extra(col->prtype)))); fixed_len = field->fixed_len; if (temp && fixed_len @@ -874,7 +879,9 @@ rec_get_converted_size_comp_prefix_low( ut_ad(col->len >= 256 || col->mtype == DATA_BLOB); extra_size += 2; } else if (len < 128 - || (col->len < 256 && col->mtype != DATA_BLOB)) { + || (col->len < 256 - + prtype_get_compression_extra(col->prtype) + && col->mtype != DATA_BLOB)) { extra_size++; } else { /* For variable-length columns, we look up the @@ -1269,12 +1276,16 @@ rec_convert_dtuple_to_rec_comp( *lens-- = (byte) (len >> 8) | 0xc0; *lens-- = (byte) len; } else { - ut_ad(len <= dtype_get_len(type) + ut_ad(len <= dtype_get_len(type) + + prtype_get_compression_extra( + dtype_get_prtype(type)) || dtype_get_mtype(type) == DATA_BLOB || !strcmp(index->name, FTS_INDEX_TABLE_IND_NAME)); if (len < 128 - || (dtype_get_len(type) < 256 + || (dtype_get_len(type) < 256 - + prtype_get_compression_extra( + dtype_get_prtype(type)) && dtype_get_mtype(type) != DATA_BLOB)) { *lens-- = (byte) len; diff --git a/storage/xtradb/row/row0ftsort.cc b/storage/xtradb/row/row0ftsort.cc index 6fac6c0d317e5..97f2b8d4b5d54 100644 --- a/storage/xtradb/row/row0ftsort.cc +++ b/storage/xtradb/row/row0ftsort.cc @@ -960,7 +960,7 @@ fts_parallel_merge( CloseHandle(psort_info->thread_hdl); #endif /*__WIN__ */ - os_thread_exit(NULL); + os_thread_exit(NULL, false); OS_THREAD_DUMMY_RETURN; } diff --git a/storage/xtradb/row/row0log.cc b/storage/xtradb/row/row0log.cc index a6751b208f74c..54183759e8d31 100644 --- a/storage/xtradb/row/row0log.cc +++ b/storage/xtradb/row/row0log.cc @@ -613,7 +613,7 @@ row_log_table_delete( &old_pk_extra_size); ut_ad(old_pk_extra_size < 0x100); - mrec_size = 4 + old_pk_size; + mrec_size = 6 + old_pk_size; /* Log enough prefix of the BLOB unless both the old and new table are in COMPACT or REDUNDANT format, @@ -643,8 +643,8 @@ row_log_table_delete( *b++ = static_cast(old_pk_extra_size); /* Log the size of external prefix we saved */ - mach_write_to_2(b, ext_size); - b += 2; + mach_write_to_4(b, ext_size); + b += 4; rec_convert_dtuple_to_temp( b + old_pk_extra_size, new_index, @@ -2268,14 +2268,14 @@ row_log_table_apply_op( break; case ROW_T_DELETE: - /* 1 (extra_size) + 2 (ext_size) + at least 1 (payload) */ - if (mrec + 4 >= mrec_end) { + /* 1 (extra_size) + 4 (ext_size) + at least 1 (payload) */ + if (mrec + 6 >= mrec_end) { return(NULL); } extra_size = *mrec++; - ext_size = mach_read_from_2(mrec); - mrec += 2; + ext_size = mach_read_from_4(mrec); + mrec += 4; ut_ad(mrec < mrec_end); /* We assume extra_size < 0x100 for the PRIMARY KEY prefix. diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc index feb18c82ab62a..3f50504bec8d6 100644 --- a/storage/xtradb/row/row0merge.cc +++ b/storage/xtradb/row/row0merge.cc @@ -523,7 +523,12 @@ row_merge_buf_add( dfield_set_len(field, len); } - ut_ad(len <= col->len || col->mtype == DATA_BLOB); + ut_ad(len <= col->len || col->mtype == DATA_BLOB || + ((col->mtype == DATA_VARCHAR || col->mtype == DATA_BINARY + || col->mtype == DATA_VARMYSQL) + && (col->len == 0 + || len <= col->len + + prtype_get_compression_extra(col->prtype)))); fixed_len = ifield->fixed_len; if (fixed_len && !dict_table_is_comp(index->table) @@ -552,7 +557,9 @@ row_merge_buf_add( } else if (dfield_is_ext(field)) { extra_size += 2; } else if (len < 128 - || (col->len < 256 && col->mtype != DATA_BLOB)) { + || (col->len < 256 - + prtype_get_compression_extra(col->prtype) + && col->mtype != DATA_BLOB)) { extra_size++; } else { /* For variable-length columns, we look up the @@ -3780,6 +3787,13 @@ row_merge_build_indexes( " exited when creating FTS" " index '%s'", indexes[i]->name); + } else { + for (j = 0; j < FTS_NUM_AUX_INDEX; + j++) { + + os_thread_join(merge_info[j] + .thread_hdl); + } } } else { /* This cannot report duplicates; an diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc index 466ff113127ad..d54ac222137b7 100644 --- a/storage/xtradb/row/row0mysql.cc +++ b/storage/xtradb/row/row0mysql.cc @@ -65,11 +65,54 @@ Created 9/17/2000 Heikki Tuuri #include "m_string.h" #include "my_sys.h" #include "ha_prototypes.h" +#include "zlib.h" #include /** Provide optional 4.x backwards compatibility for 5.0 and above */ UNIV_INTERN ibool row_rollback_on_timeout = FALSE; +/** +Z_NO_COMPRESSION = 0 +Z_BEST_SPEED = 1 +Z_BEST_COMPRESSION = 9 +Z_DEFAULT_COMPRESSION = -1 +Compression level to be used by zlib for compressed-blob columns. +Settable by user. +*/ +UNIV_INTERN uint srv_compressed_columns_zip_level = DEFAULT_COMPRESSION_LEVEL; +/** +(Z_FILTERED | Z_HUFFMAN_ONLY | Z_RLE | Z_FIXED | Z_DEFAULT_STRATEGY) + +The strategy parameter is used to tune the compression algorithm. Use the +value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a +filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only +(no string match), or Z_RLE to limit match distances to one +(run-length encoding). Filtered data consists mostly of small values with a +somewhat random distribution. In this case, the compression algorithm is +tuned to compress them better. +The effect of Z_FILTERED is to force more Huffman coding and less string +matching; it is somewhat intermediate between Z_DEFAULT_STRATEGY and +Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as fast as Z_HUFFMAN_ONLY, +but give better compression for PNG image data. The strategy parameter only +affects the compression ratio but not the correctness of the compressed +output even if it is not set appropriately. Z_FIXED prevents the use of +dynamic Huffman codes, allowing for a simpler decoder for special +applications. +*/ +const uint srv_compressed_columns_zlib_strategy = Z_DEFAULT_STRATEGY; +/** Compress the column if the data length exceeds this value. */ +UNIV_INTERN ulong srv_compressed_columns_threshold = 96; +/** +Determine if zlib needs to compute adler32 value for the compressed data. +This variables is similar to page_zip_zlib_wrap, but only used by +compressed blob columns. +*/ +const bool srv_compressed_columns_zlib_wrap = true; +/** +Determine if zlib will use custom memory allocation functions based on +InnoDB memory heap routines (mem_heap_t*). +*/ +const bool srv_compressed_columns_zlib_use_heap = false; /** Chain node of the list of tables to drop in the background. */ struct row_mysql_drop_t{ char* table_name; /*!< table name */ @@ -173,6 +216,17 @@ row_mysql_prebuilt_free_blob_heap( prebuilt->blob_heap = NULL; } +/** Frees the compress heap in prebuilt when no longer needed. */ +UNIV_INTERN +void +row_mysql_prebuilt_free_compress_heap( + row_prebuilt_t* prebuilt) /*!< in: prebuilt struct of a + ha_innobase:: table handle */ +{ + mem_heap_free(prebuilt->compress_heap); + prebuilt->compress_heap = NULL; +} + /*******************************************************************//** Stores a >= 5.0.3 format true VARCHAR length to dest, in the MySQL row format. @@ -229,6 +283,425 @@ row_mysql_read_true_varchar( return(field + 1); } +/** + Compressed BLOB header format: + --------------------------------------------------------------- + | reserved | wrap | algorithm | len-len | compressed | unused | + | [1] | [1] | [5] | [3] | [1] | [5] | + --------------------------------------------------------------- + | 0 0 | 1 1 | 2 6 | 7 9 | 10 10 | 11 15 | + --------------------------------------------------------------- + * 'reserved' bit is planned to be used in future versions of the BLOB + header. In this version it must always be + 'default_zip_column_reserved_value' (0). + * 'wrap' identifies if compression algorithm calculated a checksum + (adler32 in case of zlib) and appended it to the compressed data. + * 'algorithm' identifies which algoritm was used to compress this BLOB. + Currently, the only value 'default_zip_column_algorithm_value' (0) is + supported. + * 'len-len' field identifies the length of the column length data portion + followed by this header (see below). + * If 'compressed' bit is set to 1, then this header is immediately followed + by 1..8 bytes (depending on the value of 'len-len' bitfield) which + determine original (uncompressed) block size. These 'len-len' bytes are + followed by compressed representation of the original data. + * If 'compressed' bit is set to 0, every other bitfield ('wrap', + 'algorithm' and 'le-len') must be ignored. In this case the header is + immediately followed by uncompressed (original) data. +*/ + +/** + Currently the only supported value for the 'reserved' field is + false (0). +*/ +static const bool default_zip_column_reserved_value = false; + +/** + Currently the only supported value for the 'algorithm' field is 0, which + means 'zlib'. +*/ +static const uint default_zip_column_algorithm_value = 0; + +static const size_t zip_column_prefix_max_length = + ZIP_COLUMN_HEADER_LENGTH + 8; +static const size_t zip_column_header_length = ZIP_COLUMN_HEADER_LENGTH; + +/* 'reserved', bit 0 */ +static const uint zip_column_reserved = 0; +/* 0000 0000 0000 0001 */ +static const uint zip_column_reserved_mask = 0x0001; + +/* 'wrap', bit 1 */ +static const uint zip_column_wrap = 1; +/* 0000 0000 0000 0010 */ +static const uint zip_column_wrap_mask = 0x0002; + +/* 'algorithm', bit 2,3,4,5,6 */ +static const uint zip_column_algorithm = 2; +/* 0000 0000 0111 1100 */ +static const uint zip_column_algorithm_mask = 0x007C; + +/* 'len-len', bit 7,8,9 */ +static const uint zip_column_data_length = 7; +/* 0000 0011 1000 0000 */ +static const uint zip_column_data_length_mask = 0x0380; + +/* 'compressed', bit 10 */ +static const uint zip_column_compressed = 10; +/* 0000 0100 0000 0000 */ +static const uint zip_column_compressed_mask = 0x0400; + +/** Updates compressed block header with the given components */ +static void +column_set_compress_header( + byte* data, + bool compressed, + ulint lenlen, + uint alg, + bool wrap, + bool reserved) +{ + ulint header = 0; + header |= (compressed << zip_column_compressed); + header |= (lenlen << zip_column_data_length); + header |= (alg << zip_column_algorithm); + header |= (wrap << zip_column_wrap); + header |= (reserved << zip_column_reserved); + mach_write_to_2(data, header); +} + +/** Parse compressed block header into components */ +static void +column_get_compress_header( + const byte* data, + bool* compressed, + ulint* lenlen, + uint* alg, + bool* wrap, + bool* reserved +) +{ + ulint header = mach_read_from_2(data); + *compressed = ((header & zip_column_compressed_mask) >> + zip_column_compressed); + *lenlen = ((header & zip_column_data_length_mask) >> + zip_column_data_length); + *alg = ((header & zip_column_algorithm_mask) >> + zip_column_algorithm); + *wrap = ((header & zip_column_wrap_mask) >> + zip_column_wrap); + *reserved = ((header & zip_column_reserved_mask) >> + zip_column_reserved); +} + +/** Allocate memory for zlib. */ +static +void* +column_zip_zalloc( + void* opaque, /*!< in/out: memory heap */ + uInt items, /*!< in: number of items to allocate */ + uInt size) /*!< in: size of an item in bytes */ +{ + return(mem_heap_zalloc(static_cast(opaque), + items * size)); +} + +/** Deallocate memory for zlib. */ +static +void +column_zip_free( + void* opaque MY_ATTRIBUTE((unused)), /*!< in: memory heap */ + void* address MY_ATTRIBUTE((unused))) /*!< in: object to free */ +{ +} + +/** Configure the zlib allocator to use the given memory heap. */ +UNIV_INTERN +void +column_zip_set_alloc( + void* stream, /*!< in/out: zlib stream */ + mem_heap_t* heap) /*!< in: memory heap to use */ +{ + z_stream* strm = static_cast(stream); + + if (srv_compressed_columns_zlib_use_heap) { + strm->zalloc = column_zip_zalloc; + strm->zfree = column_zip_free; + strm->opaque = heap; + } else { + strm->zalloc = (alloc_func)0; + strm->zfree = (free_func)0; + strm->opaque = (voidpf)0; + } +} + +/** Compress blob/text/varchar column using zlib +@return pointer to the compressed data */ +byte* +row_compress_column( + const byte* data, /*!< in: data in mysql(uncompressed) + format */ + ulint *len, /*!< in: data length; out: length of + compressed data*/ + ulint lenlen, /*!< in: bytes used to store the length of + data */ + const byte* dict_data, + /*!< in: optional dictionary data used for + compression */ + ulint dict_data_len, + /*!< in: optional dictionary data length */ + row_prebuilt_t* prebuilt) + /*!< in: use prebuilt->compress_heap only + here*/ +{ + int err = 0; + ulint comp_len = *len; + ulint buf_len = *len + zip_column_prefix_max_length; + byte* buf; + byte* ptr; + z_stream c_stream; + bool wrap = srv_compressed_columns_zlib_wrap; + + int window_bits = wrap ? MAX_WBITS : -MAX_WBITS; + + if (!prebuilt->compress_heap) { + prebuilt->compress_heap = + mem_heap_create(max(UNIV_PAGE_SIZE, buf_len)); + } + + buf = static_cast(mem_heap_zalloc( + prebuilt->compress_heap,buf_len)); + + if (*len < srv_compressed_columns_threshold || + srv_compressed_columns_zip_level == Z_NO_COMPRESSION) + goto do_not_compress; + + ptr = buf + zip_column_header_length + lenlen; + + /*init deflate object*/ + c_stream.next_in = const_cast(data); + c_stream.avail_in = *len; + c_stream.next_out = ptr; + c_stream.avail_out = comp_len; + + column_zip_set_alloc(&c_stream, prebuilt->compress_heap); + + err = deflateInit2(&c_stream, srv_compressed_columns_zip_level, + Z_DEFLATED, window_bits, MAX_MEM_LEVEL, + srv_compressed_columns_zlib_strategy); + ut_a(err == Z_OK); + + if (dict_data != 0 && dict_data_len != 0) { + err = deflateSetDictionary(&c_stream, dict_data, + dict_data_len); + ut_a(err == Z_OK); + } + + err = deflate(&c_stream, Z_FINISH); + if (err != Z_STREAM_END) { + deflateEnd(&c_stream); + if (err == Z_OK) + err = Z_BUF_ERROR; + } else { + comp_len = c_stream.total_out; + err = deflateEnd(&c_stream); + } + + switch (err) { + case Z_OK: + break; + case Z_BUF_ERROR: + /* data after compress is larger than uncompressed data*/ + break; + default: + ib_logf(IB_LOG_LEVEL_ERROR, + "failed to compress the column, error: %d\n", err); + } + + /* make sure the compressed data size is smaller than + uncompressed data */ + if (err == Z_OK && + *len > (comp_len + zip_column_header_length + lenlen)) { + column_set_compress_header(buf, true, lenlen - 1, + default_zip_column_algorithm_value, wrap, + default_zip_column_reserved_value); + ptr = buf + zip_column_header_length; + /*store the uncompressed data length*/ + switch (lenlen) { + case 1: + mach_write_to_1(ptr, *len); + break; + case 2: + mach_write_to_2(ptr, *len); + break; + case 3: + mach_write_to_3(ptr, *len); + break; + case 4: + mach_write_to_4(ptr, *len); + break; + default: + ut_error; + } + + *len = comp_len + zip_column_header_length + lenlen; + return buf; + } + +do_not_compress: + ptr = buf; + column_set_compress_header(ptr, false, 0, + default_zip_column_algorithm_value, false, + default_zip_column_reserved_value); + ptr += zip_column_header_length; + memcpy(ptr, data, *len); + *len += zip_column_header_length; + return buf; +} + +/** Uncompress blob/text/varchar column using zlib +@return pointer to the uncompressed data */ +const byte* +row_decompress_column( + const byte* data, /*!< in: data in innodb(compressed) format */ + ulint *len, /*!< in: data length; out: length of + decompressed data*/ + const byte* dict_data, + /*!< in: optional dictionary data used for + decompression */ + ulint dict_data_len, + /*!< in: optional dictionary data length */ + row_prebuilt_t* prebuilt) + /*!< in: use prebuilt->compress_heap only + here*/ +{ + ulint buf_len = 0; + byte* buf; + int err = 0; + int window_bits = 0; + z_stream d_stream; + bool is_compressed = false; + bool wrap = false; + bool reserved = false; + ulint lenlen = 0; + uint alg = 0; + + ut_ad(*len != ULINT_UNDEFINED); + ut_ad(*len >= zip_column_header_length); + + column_get_compress_header(data, &is_compressed, &lenlen, &alg, + &wrap, &reserved); + + if (reserved != default_zip_column_reserved_value) { + ib_logf(IB_LOG_LEVEL_FATAL, + "unsupported compressed BLOB header format\n"); + } + + if (alg != default_zip_column_algorithm_value) { + ib_logf(IB_LOG_LEVEL_FATAL, + "unsupported 'algorithm' value in the" + " compressed BLOB header\n"); + } + + ut_a(lenlen < 4); + + data += zip_column_header_length; + if (!is_compressed) { /* column not compressed */ + *len -= zip_column_header_length; + return data; + } + + lenlen++; + + ulint comp_len = *len - zip_column_header_length - lenlen; + + ulint uncomp_len = 0; + switch (lenlen) { + case 1: + uncomp_len = mach_read_from_1(data); + break; + case 2: + uncomp_len = mach_read_from_2(data); + break; + case 3: + uncomp_len = mach_read_from_3(data); + break; + case 4: + uncomp_len = mach_read_from_4(data); + break; + default: + ut_error; + } + + data += lenlen; + + /* data is compressed, decompress it*/ + if (!prebuilt->compress_heap) { + prebuilt->compress_heap = + mem_heap_create(max(UNIV_PAGE_SIZE, uncomp_len)); + } + + buf_len = uncomp_len; + buf = static_cast(mem_heap_zalloc( + prebuilt->compress_heap, buf_len)); + + /* init d_stream */ + d_stream.next_in = const_cast(data); + d_stream.avail_in = comp_len; + d_stream.next_out = buf; + d_stream.avail_out = buf_len; + + column_zip_set_alloc(&d_stream, prebuilt->compress_heap); + + window_bits = wrap ? MAX_WBITS : -MAX_WBITS; + err = inflateInit2(&d_stream, window_bits); + ut_a(err == Z_OK); + + err = inflate(&d_stream, Z_FINISH); + if (err == Z_NEED_DICT) { + ut_a(dict_data != 0 && dict_data_len != 0); + err = inflateSetDictionary(&d_stream, dict_data, + dict_data_len); + ut_a(err == Z_OK); + err = inflate(&d_stream, Z_FINISH); + } + + if (err != Z_STREAM_END) { + inflateEnd(&d_stream); + if (err == Z_BUF_ERROR && d_stream.avail_in == 0) + err = Z_DATA_ERROR; + } else { + buf_len = d_stream.total_out; + err = inflateEnd(&d_stream); + } + + switch (err) { + case Z_OK: + break; + case Z_BUF_ERROR: + ib_logf(IB_LOG_LEVEL_FATAL, + "zlib buf error, this shouldn't happen\n"); + break; + default: + ib_logf(IB_LOG_LEVEL_FATAL, + "failed to decompress column, error: %d\n", err); + } + + if (err == Z_OK) { + if (buf_len != uncomp_len) { + ib_logf(IB_LOG_LEVEL_FATAL, + "failed to decompress blob column, may" + " be corrupted\n"); + } + *len = buf_len; + return buf; + } + + *len -= (zip_column_header_length + lenlen); + return data; +} + + /*******************************************************************//** Stores a reference to a BLOB in the MySQL format. */ UNIV_INTERN @@ -242,10 +715,21 @@ row_mysql_store_blob_ref( to 4 bytes */ const void* data, /*!< in: BLOB data; if the value to store is SQL NULL this should be NULL pointer */ - ulint len) /*!< in: BLOB length; if the value to store + ulint len, /*!< in: BLOB length; if the value to store is SQL NULL this should be 0; remember also to set the NULL bit in the MySQL record header! */ + bool need_decompression, + /*!< in: if the data need to be compressed*/ + const byte* dict_data, + /*!< in: optional compression dictionary + data */ + ulint dict_data_len, + /*!< in: optional compression dictionary data + length */ + row_prebuilt_t* prebuilt) + /*compress_heap only + here */ { /* MySQL might assume the field is set to zero except the length and the pointer fields */ @@ -257,13 +741,28 @@ row_mysql_store_blob_ref( In 32-bit architectures we only use the first 4 bytes of the pointer slot. */ - ut_a(col_len - 8 > 1 || len < 256); - ut_a(col_len - 8 > 2 || len < 256 * 256); - ut_a(col_len - 8 > 3 || len < 256 * 256 * 256); + ut_a(col_len - 8 > 1 || + len < 256 + + (need_decompression ? ZIP_COLUMN_HEADER_LENGTH : 0)); + ut_a(col_len - 8 > 2 || + len < 256 * 256 + + (need_decompression ? ZIP_COLUMN_HEADER_LENGTH : 0)); + ut_a(col_len - 8 > 3 || + len < 256 * 256 * 256 + + (need_decompression ? ZIP_COLUMN_HEADER_LENGTH : 0)); - mach_write_to_n_little_endian(dest, col_len - 8, len); + const byte *ptr = NULL; - memcpy(dest + col_len - 8, &data, sizeof data); + if (need_decompression) + ptr = row_decompress_column((const byte*)data, &len, + dict_data, dict_data_len, prebuilt); + + if (ptr) + memcpy(dest + col_len - 8, &ptr, sizeof ptr); + else + memcpy(dest + col_len - 8, &data, sizeof data); + + mach_write_to_n_little_endian(dest, col_len - 8, len); } /*******************************************************************//** @@ -276,15 +775,32 @@ row_mysql_read_blob_ref( ulint* len, /*!< out: BLOB length */ const byte* ref, /*!< in: BLOB reference in the MySQL format */ - ulint col_len) /*!< in: BLOB reference length + ulint col_len, /*!< in: BLOB reference length (not BLOB length) */ + bool need_compression, + /*!< in: if the data need to be + compressed*/ + const byte* dict_data, /*!< in: optional compression + dictionary data */ + ulint dict_data_len, /*!< in: optional compression + dictionary data length */ + row_prebuilt_t* prebuilt) /*!< in: use prebuilt->compress_heap + only here */ { - byte* data; + byte* data = NULL; + byte* ptr = NULL; *len = mach_read_from_n_little_endian(ref, col_len - 8); memcpy(&data, ref + col_len - 8, sizeof data); + if (need_compression) { + ptr = row_compress_column(data, len, col_len - 8, dict_data, + dict_data_len, prebuilt); + if (ptr) + data = ptr; + } + return(data); } @@ -367,7 +883,16 @@ row_mysql_store_col_in_innobase_format( necessarily the length of the actual payload data; if the column is a true VARCHAR then this is irrelevant */ - ulint comp) /*!< in: nonzero=compact format */ + ulint comp, /*!< in: nonzero=compact format */ + bool need_compression, + /*!< in: if the data need to be + compressed*/ + const byte* dict_data, /*!< in: optional compression + dictionary data */ + ulint dict_data_len, /*!< in: optional compression + dictionary data length */ + row_prebuilt_t* prebuilt) /*!< in: use prebuilt->compress_heap + only here */ { const byte* ptr = mysql_data; const dtype_t* dtype; @@ -420,8 +945,14 @@ row_mysql_store_col_in_innobase_format( lenlen = 2; } - ptr = row_mysql_read_true_varchar(&col_len, mysql_data, - lenlen); + const byte* tmp_ptr = row_mysql_read_true_varchar( + &col_len, mysql_data, lenlen); + if (need_compression) + ptr = row_compress_column(tmp_ptr, &col_len, + lenlen, dict_data, dict_data_len, + prebuilt); + else + ptr = tmp_ptr; } else { /* Remove trailing spaces from old style VARCHAR columns. */ @@ -503,7 +1034,9 @@ row_mysql_store_col_in_innobase_format( } } else if (type == DATA_BLOB && row_format_col) { - ptr = row_mysql_read_blob_ref(&col_len, mysql_data, col_len); + ptr = row_mysql_read_blob_ref(&col_len, mysql_data, col_len, + need_compression, dict_data, dict_data_len, + prebuilt); } dfield_set_data(dfield, ptr, col_len); @@ -561,7 +1094,11 @@ row_mysql_convert_row_to_innobase( TRUE, /* MySQL row format data */ mysql_rec + templ->mysql_col_offset, templ->mysql_col_len, - dict_table_is_comp(prebuilt->table)); + dict_table_is_comp(prebuilt->table), + templ->compressed, + reinterpret_cast( + templ->zip_dict_data.str), + templ->zip_dict_data.length, prebuilt); next_column: ; } @@ -907,6 +1444,10 @@ row_prebuilt_free( mem_heap_free(prebuilt->blob_heap); } + if (prebuilt->compress_heap) { + mem_heap_free(prebuilt->compress_heap); + } + if (prebuilt->old_vers_heap) { mem_heap_free(prebuilt->old_vers_heap); } @@ -1333,6 +1874,9 @@ row_insert_for_mysql( return(DB_READ_ONLY); } + if (UNIV_LIKELY_NULL(prebuilt->compress_heap)) + mem_heap_empty(prebuilt->compress_heap); + trx->op_info = "inserting"; row_mysql_delay_if_needed(); @@ -2693,6 +3237,10 @@ row_drop_tables_for_mysql_in_background(void) return(n_tables + n_tables_dropped); } + DBUG_EXECUTE_IF("row_drop_tables_in_background_sleep", + os_thread_sleep(5000000); + ); + table = dict_table_open_on_name(drop->table_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE); @@ -2703,6 +3251,16 @@ row_drop_tables_for_mysql_in_background(void) goto already_dropped; } + if (!table->to_be_dropped) { + /* There is a scenario: the old table is dropped + just after it's added into drop list, and new + table with the same name is created, then we try + to drop the new table in background. */ + dict_table_close(table, FALSE, FALSE); + + goto already_dropped; + } + ut_a(!table->can_be_evicted); dict_table_close(table, FALSE, FALSE); @@ -2833,6 +3391,12 @@ row_mysql_table_id_reassign( pars_info_add_ull_literal(info, "old_id", table->id); pars_info_add_ull_literal(info, "new_id", *new_id); + /* As micro-SQL does not support int4 == int8 comparisons, + old and new IDs are added again under different names as + int4 values*/ + pars_info_add_int4_literal(info, "old_id_narrow", table->id); + pars_info_add_int4_literal(info, "new_id_narrow", *new_id); + err = que_eval_sql( info, "PROCEDURE RENUMBER_TABLE_PROC () IS\n" @@ -2843,6 +3407,8 @@ row_mysql_table_id_reassign( " WHERE TABLE_ID = :old_id;\n" "UPDATE SYS_INDEXES SET TABLE_ID = :new_id\n" " WHERE TABLE_ID = :old_id;\n" + "UPDATE SYS_ZIP_DICT_COLS SET TABLE_ID = :new_id_narrow\n" + " WHERE TABLE_ID = :old_id_narrow;\n" "END;\n", FALSE, trx); return(err); @@ -3609,6 +4175,12 @@ row_truncate_table_for_mysql( pars_info_add_ull_literal(info, "old_id", table->id); pars_info_add_ull_literal(info, "new_id", new_id); + /* As micro-SQL does not support int4 == int8 comparisons, + old and new IDs are added again under different names as + int4 values*/ + pars_info_add_int4_literal(info, "old_id_narrow", table->id); + pars_info_add_int4_literal(info, "new_id_narrow", new_id); + err = que_eval_sql(info, "PROCEDURE RENUMBER_TABLE_ID_PROC () IS\n" "BEGIN\n" @@ -3620,6 +4192,9 @@ row_truncate_table_for_mysql( "UPDATE SYS_INDEXES" " SET TABLE_ID = :new_id, SPACE = :new_space\n" " WHERE TABLE_ID = :old_id;\n" + "UPDATE SYS_ZIP_DICT_COLS\n" + " SET TABLE_ID = :new_id_narrow\n" + " WHERE TABLE_ID = :old_id_narrow;\n" "END;\n" , FALSE, trx); @@ -3962,6 +4537,13 @@ row_drop_table_for_mysql( } } + + DBUG_EXECUTE_IF("row_drop_table_add_to_background", + row_add_table_to_background_drop_list(table->name); + err = DB_SUCCESS; + goto funct_exit; + ); + /* TODO: could we replace the counter n_foreign_key_checks_running with lock checks on the table? Acquire here an exclusive lock on the table, and rewrite lock0lock.cc and the lock wait in srv0srv.cc so that @@ -4232,6 +4814,19 @@ row_drop_table_for_mysql( filepath = fil_make_ibd_name(tablename, false); } + /* Remove all compression dictionary references for the + table */ + err = dict_create_remove_zip_dict_references_for_table( + table->id, trx); + if (err != DB_SUCCESS) { + ib_logf(IB_LOG_LEVEL_ERROR, "Error: (%s) not " + "able to remove compression dictionary " + "references for table %s", ut_strerr(err), + tablename); + + goto funct_exit; + } + if (dict_table_has_fts_index(table) || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) { ut_ad(table->n_ref_count == 0); @@ -4578,6 +5173,19 @@ row_drop_database_for_mysql( row_mysql_lock_data_dictionary(trx); while ((table_name = dict_get_first_table_name_in_db(name))) { + /* Drop parent table if it is a fts aux table, to + avoid accessing dropped fts aux tables in information + scheam when parent table still exists. + Note: Drop parent table will drop fts aux tables. */ + char* parent_table_name; + parent_table_name = fts_get_parent_table_name( + table_name, strlen(table_name)); + + if (parent_table_name != NULL) { + mem_free(table_name); + table_name = parent_table_name; + } + ut_a(memcmp(table_name, name, namelen) == 0); table = dict_table_open_on_name( diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index 74579687a9b5e..d2821abdc2ed4 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -2460,9 +2460,11 @@ row_sel_convert_mysql_key_to_innobase( if (UNIV_LIKELY(!is_null)) { buf = row_mysql_store_col_in_innobase_format( dfield, buf, - FALSE, /* MySQL key value format col */ + /* MySQL key value format col */ + FALSE, key_ptr + data_offset, data_len, - dict_table_is_comp(index->table)); + dict_table_is_comp(index->table), + false, 0, 0 ,0); ut_a(buf <= original_buf + buf_len); } @@ -2555,12 +2557,16 @@ row_sel_store_row_id_to_prebuilt( #ifdef UNIV_DEBUG /** Convert a non-SQL-NULL field from Innobase format to MySQL format. */ -# define row_sel_field_store_in_mysql_format(dest,templ,idx,field,src,len) \ - row_sel_field_store_in_mysql_format_func(dest,templ,idx,field,src,len) +# define row_sel_field_store_in_mysql_format( \ + dest,templ,idx,field,src,len,prebuilt) \ + row_sel_field_store_in_mysql_format_func \ + (dest,templ,idx,field,src,len, prebuilt) #else /* UNIV_DEBUG */ /** Convert a non-SQL-NULL field from Innobase format to MySQL format. */ -# define row_sel_field_store_in_mysql_format(dest,templ,idx,field,src,len) \ - row_sel_field_store_in_mysql_format_func(dest,templ,src,len) +# define row_sel_field_store_in_mysql_format( \ + dest,templ,idx,field,src,len,prebuilt) \ + row_sel_field_store_in_mysql_format_func \ + (dest,templ,src,len, prebuilt) #endif /* UNIV_DEBUG */ /**************************************************************//** @@ -2590,7 +2596,10 @@ row_sel_field_store_in_mysql_format_func( templ->icp_rec_field_no */ #endif /* UNIV_DEBUG */ const byte* data, /*!< in: data to store */ - ulint len) /*!< in: length of the data */ + ulint len, /*!< in: length of the data */ + row_prebuilt_t* prebuilt) + /*!< in: use prebuilt->compress_heap + only here */ { byte* ptr; #ifdef UNIV_DEBUG @@ -2634,6 +2643,15 @@ row_sel_field_store_in_mysql_format_func( field_end = dest + templ->mysql_col_len; if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) { + /* If this is a compressed column, + decompress it first */ + if (templ->compressed) + data = row_decompress_column(data, &len, + reinterpret_cast( + templ->zip_dict_data.str), + templ->zip_dict_data.length, + prebuilt); + /* This is a >= 5.0.3 type true VARCHAR. Store the length of the data to the first byte or the first two bytes of dest. */ @@ -2684,7 +2702,11 @@ row_sel_field_store_in_mysql_format_func( already copied to the buffer in row_sel_store_mysql_rec */ row_mysql_store_blob_ref(dest, templ->mysql_col_len, data, - len); + len, templ->compressed, + reinterpret_cast( + templ->zip_dict_data.str), + templ->zip_dict_data.length, + prebuilt); break; case DATA_MYSQL: @@ -2837,7 +2859,7 @@ row_sel_store_mysql_field_func( row_sel_field_store_in_mysql_format( mysql_rec + templ->mysql_col_offset, - templ, index, field_no, data, len); + templ, index, field_no, data, len, prebuilt); if (heap != prebuilt->blob_heap) { mem_heap_free(heap); @@ -2887,7 +2909,7 @@ row_sel_store_mysql_field_func( row_sel_field_store_in_mysql_format( mysql_rec + templ->mysql_col_offset, - templ, index, field_no, data, len); + templ, index, field_no, data, len, prebuilt); } ut_ad(len != UNIV_SQL_NULL); @@ -2935,6 +2957,9 @@ row_sel_store_mysql_rec( prebuilt->blob_heap = NULL; } + if (UNIV_LIKELY_NULL(prebuilt->compress_heap)) + mem_heap_empty(prebuilt->compress_heap); + for (i = 0; i < prebuilt->n_template; i++) { const mysql_row_templ_t*templ = &prebuilt->mysql_template[i]; const ulint field_no diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index 930694ac0af23..8f1d341ad1b2c 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -2714,6 +2714,12 @@ innobase_start_or_create_for_mysql(void) return(err); } + /* Create the SYS_ZIP_DICT system table */ + err = dict_create_or_check_sys_zip_dict(); + if (err != DB_SUCCESS) { + return(err); + } + srv_is_being_started = FALSE; ut_a(trx_purge_state() == PURGE_STATE_INIT); From d9787aa29af3e77c5cd04defe0331c721542cff6 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 25 Oct 2016 17:03:23 +0200 Subject: [PATCH 30/44] 5.6.33-79.0 --- storage/tokudb/CMakeLists.txt | 2 +- .../tokudb/PerconaFT/buildheader/make_tdb.cc | 3 + .../ft/cachetable/cachetable-internal.h | 2 + .../PerconaFT/ft/cachetable/cachetable.cc | 16 ++ .../PerconaFT/ft/cachetable/cachetable.h | 6 + storage/tokudb/PerconaFT/ft/ft-ops.cc | 151 ++++++++++++++- storage/tokudb/PerconaFT/ft/ft-ops.h | 2 + storage/tokudb/PerconaFT/ft/ft.cc | 14 +- storage/tokudb/PerconaFT/ft/ft.h | 6 + .../tokudb/PerconaFT/ft/logger/logformat.cc | 9 + storage/tokudb/PerconaFT/ft/logger/recover.cc | 78 ++++++++ .../PerconaFT/ft/serialize/rbtree_mhs.h | 16 +- .../test-rbtree-insert-remove-without-mhs.cc | 7 +- storage/tokudb/PerconaFT/ft/txn/roll.cc | 118 +++++++++++- storage/tokudb/PerconaFT/portability/file.cc | 6 + .../tokudb/PerconaFT/portability/memory.cc | 9 + storage/tokudb/PerconaFT/portability/memory.h | 4 +- .../PerconaFT/portability/toku_portability.h | 2 + .../tokudb/PerconaFT/src/tests/CMakeLists.txt | 42 ++-- .../src/tests/recovery_fileops_unit.cc | 155 ++++++++------- storage/tokudb/PerconaFT/src/ydb-internal.h | 3 +- storage/tokudb/PerconaFT/src/ydb.cc | 50 ++++- storage/tokudb/PerconaFT/src/ydb_db.cc | 99 +++++++--- storage/tokudb/PerconaFT/src/ydb_db.h | 16 ++ storage/tokudb/hatoku_hton.cc | 1 + .../r/dir-per-db-with-custom-data-dir.result | 10 + .../mysql-test/tokudb/r/dir_per_db.result | 180 ++++++++++++++++++ .../r/i_s_tokudb_lock_waits_released.result | 12 ++ .../mysql-test/tokudb/r/row_format.result | 51 +++++ ...dir-per-db-with-custom-data-dir-master.opt | 1 + .../t/dir-per-db-with-custom-data-dir.test | 16 ++ .../mysql-test/tokudb/t/dir_per_db.test | 76 ++++++++ .../tokudb/t/dir_per_db_show_table_files.inc | 9 + .../t/i_s_tokudb_lock_waits_released.test | 29 ++- .../mysql-test/tokudb/t/row_format.test | 41 ++++ .../mysql-test/tokudb_bugs/r/db938.result | 1 + .../mysql-test/tokudb_bugs/t/db938.test | 3 + .../t/partition_debug_sync_tokudb.test | 4 +- storage/tokudb/tokudb_sysvars.cc | 14 ++ storage/tokudb/tokudb_sysvars.h | 1 + 40 files changed, 1105 insertions(+), 160 deletions(-) create mode 100644 storage/tokudb/mysql-test/tokudb/r/dir-per-db-with-custom-data-dir.result create mode 100644 storage/tokudb/mysql-test/tokudb/r/dir_per_db.result create mode 100644 storage/tokudb/mysql-test/tokudb/r/row_format.result create mode 100644 storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir-master.opt create mode 100644 storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir.test create mode 100644 storage/tokudb/mysql-test/tokudb/t/dir_per_db.test create mode 100644 storage/tokudb/mysql-test/tokudb/t/dir_per_db_show_table_files.inc create mode 100644 storage/tokudb/mysql-test/tokudb/t/row_format.test diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt index fbb02582f4da6..ad30e6d40eb4f 100644 --- a/storage/tokudb/CMakeLists.txt +++ b/storage/tokudb/CMakeLists.txt @@ -1,4 +1,4 @@ -SET(TOKUDB_VERSION 5.6.32-78.1) +SET(TOKUDB_VERSION 5.6.33-79.0) # PerconaFT only supports x86-64 and cmake-2.8.9+ IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT CMAKE_VERSION VERSION_LESS "2.8.9") diff --git a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc index 576f902f6aee8..7ede78b3c0db3 100644 --- a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc +++ b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc @@ -422,6 +422,9 @@ static void print_db_env_struct (void) { "int (*set_checkpoint_pool_threads)(DB_ENV *, uint32_t)", "void (*set_check_thp)(DB_ENV *, bool new_val)", "bool (*get_check_thp)(DB_ENV *)", + "bool (*set_dir_per_db)(DB_ENV *, bool new_val)", + "bool (*get_dir_per_db)(DB_ENV *)", + "const char *(*get_data_dir)(DB_ENV *env)", NULL}; sort_and_dump_fields("db_env", true, extra); diff --git a/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h b/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h index dc6aec9226d8f..05fb771de0871 100644 --- a/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h +++ b/storage/tokudb/PerconaFT/ft/cachetable/cachetable-internal.h @@ -138,6 +138,8 @@ struct cachefile { // nor attempt to open any cachefile with the same fname (dname) // until this cachefile has been fully closed and unlinked. bool unlink_on_close; + // If set then fclose will not be logged in recovery log. + bool skip_log_recover_on_close; int fd; /* Bug: If a file is opened read-only, then it is stuck in read-only. If it is opened read-write, then subsequent writers can write to it too. */ CACHETABLE cachetable; struct fileid fileid; diff --git a/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc index 5bba977de1a33..6d753805fa9fa 100644 --- a/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc +++ b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc @@ -467,6 +467,10 @@ toku_cachefile_fname_in_env (CACHEFILE cf) { return cf->fname_in_env; } +void toku_cachefile_set_fname_in_env(CACHEFILE cf, char *new_fname_in_env) { + cf->fname_in_env = new_fname_in_env; +} + int toku_cachefile_get_fd (CACHEFILE cf) { return cf->fd; @@ -2903,6 +2907,18 @@ bool toku_cachefile_is_unlink_on_close(CACHEFILE cf) { return cf->unlink_on_close; } +void toku_cachefile_skip_log_recover_on_close(CACHEFILE cf) { + cf->skip_log_recover_on_close = true; +} + +void toku_cachefile_do_log_recover_on_close(CACHEFILE cf) { + cf->skip_log_recover_on_close = false; +} + +bool toku_cachefile_is_skip_log_recover_on_close(CACHEFILE cf) { + return cf->skip_log_recover_on_close; +} + uint64_t toku_cachefile_size(CACHEFILE cf) { int64_t file_size; int fd = toku_cachefile_get_fd(cf); diff --git a/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h index 148326562ab81..3b3cb0a2d4691 100644 --- a/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h +++ b/storage/tokudb/PerconaFT/ft/cachetable/cachetable.h @@ -500,12 +500,18 @@ int toku_cachefile_get_fd (CACHEFILE); // Return the filename char * toku_cachefile_fname_in_env (CACHEFILE cf); +void toku_cachefile_set_fname_in_env(CACHEFILE cf, char *new_fname_in_env); + // Make it so when the cachefile closes, the underlying file is unlinked void toku_cachefile_unlink_on_close(CACHEFILE cf); // is this cachefile marked as unlink on close? bool toku_cachefile_is_unlink_on_close(CACHEFILE cf); +void toku_cachefile_skip_log_recover_on_close(CACHEFILE cf); +void toku_cachefile_do_log_recover_on_close(CACHEFILE cf); +bool toku_cachefile_is_skip_log_recover_on_close(CACHEFILE cf); + // Return the logger associated with the cachefile struct tokulogger *toku_cachefile_logger(CACHEFILE cf); diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc index f131668889e9e..30a8710d7aa1e 100644 --- a/storage/tokudb/PerconaFT/ft/ft-ops.cc +++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc @@ -149,22 +149,23 @@ basement nodes, bulk fetch, and partial fetch: #include "ft/cachetable/checkpoint.h" #include "ft/cursor.h" -#include "ft/ft.h" #include "ft/ft-cachetable-wrappers.h" #include "ft/ft-flusher.h" #include "ft/ft-internal.h" -#include "ft/msg.h" +#include "ft/ft.h" #include "ft/leafentry.h" #include "ft/logger/log-internal.h" +#include "ft/msg.h" #include "ft/node.h" #include "ft/serialize/block_table.h" -#include "ft/serialize/sub_block.h" #include "ft/serialize/ft-serialize.h" #include "ft/serialize/ft_layout_version.h" #include "ft/serialize/ft_node-serialize.h" +#include "ft/serialize/sub_block.h" #include "ft/txn/txn_manager.h" -#include "ft/ule.h" #include "ft/txn/xids.h" +#include "ft/ule.h" +#include "src/ydb-internal.h" #include @@ -179,6 +180,7 @@ basement nodes, bulk fetch, and partial fetch: #include +#include /* Status is intended for display to humans to help understand system behavior. * It does not need to be perfectly thread-safe. */ @@ -2593,12 +2595,104 @@ static inline int ft_open_maybe_direct(const char *filename, int oflag, int mode static const mode_t file_mode = S_IRUSR+S_IWUSR+S_IRGRP+S_IWGRP+S_IROTH+S_IWOTH; +inline bool toku_file_is_root(const char *path, const char *last_slash) { + return last_slash == path; +} + +static std::unique_ptr toku_file_get_parent_dir( + const char *path) { + std::unique_ptr result(nullptr, &toku_free); + + bool has_trailing_slash = false; + + /* Find the offset of the last slash */ + const char *last_slash = strrchr(path, OS_PATH_SEPARATOR); + + if (!last_slash) { + /* No slash in the path, return NULL */ + return result; + } + + /* Ok, there is a slash. Is there anything after it? */ + if (static_cast(last_slash - path + 1) == strlen(path)) { + has_trailing_slash = true; + } + + /* Reduce repetative slashes. */ + while (last_slash > path && last_slash[-1] == OS_PATH_SEPARATOR) { + last_slash--; + } + + /* Check for the root of a drive. */ + if (toku_file_is_root(path, last_slash)) { + return result; + } + + /* If a trailing slash prevented the first strrchr() from trimming + the last component of the path, trim that component now. */ + if (has_trailing_slash) { + /* Back up to the previous slash. */ + last_slash--; + while (last_slash > path && last_slash[0] != OS_PATH_SEPARATOR) { + last_slash--; + } + + /* Reduce repetative slashes. */ + while (last_slash > path && last_slash[-1] == OS_PATH_SEPARATOR) { + last_slash--; + } + } + + /* Check for the root of a drive. */ + if (toku_file_is_root(path, last_slash)) { + return result; + } + + result.reset(toku_strndup(path, last_slash - path)); + return result; +} + +static bool toku_create_subdirs_if_needed(const char *path) { + static const mode_t dir_mode = S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP | + S_IWGRP | S_IXGRP | S_IROTH | S_IXOTH; + + toku_struct_stat stat; + bool subdir_exists = true; + auto subdir = toku_file_get_parent_dir(path); + + if (!subdir.get()) + return true; + + if (toku_stat(subdir.get(), &stat) == -1) { + if (ENOENT == get_error_errno()) + subdir_exists = false; + else + return false; + } + + if (subdir_exists) { + if (!S_ISDIR(stat.st_mode)) + return false; + return true; + } + + if (!toku_create_subdirs_if_needed(subdir.get())) + return false; + + if (toku_os_mkdir(subdir.get(), dir_mode)) + return false; + + return true; +} + // open a file for use by the ft // Requires: File does not exist. static int ft_create_file(FT_HANDLE UU(ft_handle), const char *fname, int *fdp) { int r; int fd; int er; + if (!toku_create_subdirs_if_needed(fname)) + return get_error_errno(); fd = ft_open_maybe_direct(fname, O_RDWR | O_BINARY, file_mode); assert(fd==-1); if ((er = get_maybe_error_errno()) != ENOENT) { @@ -4427,6 +4521,55 @@ void toku_ft_unlink(FT_HANDLE handle) { toku_cachefile_unlink_on_close(cf); } +int toku_ft_rename_iname(DB_TXN *txn, + const char *data_dir, + const char *old_iname, + const char *new_iname, + CACHETABLE ct) { + int r = 0; + + std::unique_ptr new_iname_full(nullptr, + &toku_free); + std::unique_ptr old_iname_full(nullptr, + &toku_free); + + new_iname_full.reset(toku_construct_full_name(2, data_dir, new_iname)); + old_iname_full.reset(toku_construct_full_name(2, data_dir, old_iname)); + + if (txn) { + BYTESTRING bs_old_name = {static_cast(strlen(old_iname) + 1), + const_cast(old_iname)}; + BYTESTRING bs_new_name = {static_cast(strlen(new_iname) + 1), + const_cast(new_iname)}; + FILENUM filenum = FILENUM_NONE; + { + CACHEFILE cf; + r = toku_cachefile_of_iname_in_env(ct, old_iname, &cf); + if (r != ENOENT) { + char *old_fname_in_cf = toku_cachefile_fname_in_env(cf); + toku_cachefile_set_fname_in_env(cf, toku_xstrdup(new_iname)); + toku_free(old_fname_in_cf); + filenum = toku_cachefile_filenum(cf); + } + } + toku_logger_save_rollback_frename( + db_txn_struct_i(txn)->tokutxn, &bs_old_name, &bs_new_name); + toku_log_frename(db_txn_struct_i(txn)->tokutxn->logger, + (LSN *)0, + 0, + toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn), + bs_old_name, + filenum, + bs_new_name); + } + + r = toku_os_rename(old_iname_full.get(), new_iname_full.get()); + if (r != 0) + return r; + r = toku_fsync_directory(new_iname_full.get()); + return r; +} + int toku_ft_get_fragmentation(FT_HANDLE ft_handle, TOKU_DB_FRAGMENTATION report) { int fd = toku_cachefile_get_fd(ft_handle->ft->cf); toku_ft_lock(ft_handle->ft); diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.h b/storage/tokudb/PerconaFT/ft/ft-ops.h index 313a74628ea17..70cf045d43c1c 100644 --- a/storage/tokudb/PerconaFT/ft/ft-ops.h +++ b/storage/tokudb/PerconaFT/ft/ft-ops.h @@ -48,6 +48,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #include "ft/msg.h" #include "util/dbt.h" +#define OS_PATH_SEPARATOR '/' + typedef struct ft_handle *FT_HANDLE; int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *, int nodesize, int basementnodesize, enum toku_compression_method compression_method, CACHETABLE, TOKUTXN, int(*)(DB *,const DBT*,const DBT*)) __attribute__ ((warn_unused_result)); diff --git a/storage/tokudb/PerconaFT/ft/ft.cc b/storage/tokudb/PerconaFT/ft/ft.cc index 699fcc5760375..7c94b4c59d345 100644 --- a/storage/tokudb/PerconaFT/ft/ft.cc +++ b/storage/tokudb/PerconaFT/ft/ft.cc @@ -253,7 +253,19 @@ static void ft_close(CACHEFILE cachefile, int fd, void *header_v, bool oplsn_val char* fname_in_env = toku_cachefile_fname_in_env(cachefile); assert(fname_in_env); BYTESTRING bs = {.len=(uint32_t) strlen(fname_in_env), .data=fname_in_env}; - toku_log_fclose(logger, &lsn, ft->h->dirty, bs, toku_cachefile_filenum(cachefile)); // flush the log on close (if new header is being written), otherwise it might not make it out. + if (!toku_cachefile_is_skip_log_recover_on_close(cachefile)) { + toku_log_fclose( + logger, + &lsn, + ft->h->dirty, + bs, + toku_cachefile_filenum(cachefile)); // flush the log on + // close (if new header + // is being written), + // otherwise it might + // not make it out. + toku_cachefile_do_log_recover_on_close(cachefile); + } } } if (ft->h->dirty) { // this is the only place this bit is tested (in currentheader) diff --git a/storage/tokudb/PerconaFT/ft/ft.h b/storage/tokudb/PerconaFT/ft/ft.h index d600e093bdcf0..7a3c4fa783cb9 100644 --- a/storage/tokudb/PerconaFT/ft/ft.h +++ b/storage/tokudb/PerconaFT/ft/ft.h @@ -53,6 +53,12 @@ typedef struct ft_options *FT_OPTIONS; void toku_ft_unlink(FT_HANDLE handle); void toku_ft_unlink_on_commit(FT_HANDLE handle, TOKUTXN txn); +int toku_ft_rename_iname(DB_TXN *txn, + const char *data_dir, + const char *old_iname, + const char *new_iname, + CACHETABLE ct); + void toku_ft_init_reflock(FT ft); void toku_ft_destroy_reflock(FT ft); void toku_ft_grab_reflock(FT ft); diff --git a/storage/tokudb/PerconaFT/ft/logger/logformat.cc b/storage/tokudb/PerconaFT/ft/logger/logformat.cc index 6f3baa81c86b2..49b611388038d 100644 --- a/storage/tokudb/PerconaFT/ft/logger/logformat.cc +++ b/storage/tokudb/PerconaFT/ft/logger/logformat.cc @@ -90,6 +90,10 @@ const struct logtype rollbacks[] = { {"fcreate", 'F', FA{{"FILENUM", "filenum", 0}, {"BYTESTRING", "iname", 0}, NULLFIELD}, LOG_BEGIN_ACTION_NA}, + //rename file + {"frename", 'n', FA{{"BYTESTRING", "old_iname", 0}, + {"BYTESTRING", "new_iname", 0}, + NULLFIELD}, LOG_BEGIN_ACTION_NA}, // cmdinsert is used to insert a key-value pair into a DB. For rollback we don't need the data. {"cmdinsert", 'i', FA{ {"FILENUM", "filenum", 0}, @@ -195,6 +199,11 @@ const struct logtype logtypes[] = { {"fdelete", 'U', FA{{"TXNID_PAIR", "xid", 0}, {"FILENUM", "filenum", 0}, NULLFIELD}, SHOULD_LOG_BEGIN}, + {"frename", 'n', FA{{"TXNID_PAIR", "xid", 0}, + {"BYTESTRING", "old_iname", 0}, + {"FILENUM", "old_filenum", 0}, + {"BYTESTRING", "new_iname", 0}, + NULLFIELD}, IGNORE_LOG_BEGIN}, {"enq_insert", 'I', FA{{"FILENUM", "filenum", 0}, {"TXNID_PAIR", "xid", 0}, {"BYTESTRING", "key", 0}, diff --git a/storage/tokudb/PerconaFT/ft/logger/recover.cc b/storage/tokudb/PerconaFT/ft/logger/recover.cc index 38f29773bd630..a9c30c0e37a91 100644 --- a/storage/tokudb/PerconaFT/ft/logger/recover.cc +++ b/storage/tokudb/PerconaFT/ft/logger/recover.cc @@ -36,6 +36,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." +#include #include "ft/cachetable/cachetable.h" #include "ft/cachetable/checkpoint.h" #include "ft/ft.h" @@ -935,6 +936,83 @@ static int toku_recover_backward_fdelete (struct logtype_fdelete *UU(l), RECOVER return 0; } +static int toku_recover_frename(struct logtype_frename *l, RECOVER_ENV renv) { + assert(renv); + assert(renv->env); + + toku_struct_stat stat; + const char *data_dir = renv->env->get_data_dir(renv->env); + bool old_exist = true; + bool new_exist = true; + + assert(data_dir); + + struct file_map_tuple *tuple; + + std::unique_ptr old_iname_full( + toku_construct_full_name(2, data_dir, l->old_iname.data), &toku_free); + std::unique_ptr new_iname_full( + toku_construct_full_name(2, data_dir, l->new_iname.data), &toku_free); + + if (toku_stat(old_iname_full.get(), &stat) == -1) { + if (ENOENT == errno) + old_exist = false; + else + return 1; + } + + if (toku_stat(new_iname_full.get(), &stat) == -1) { + if (ENOENT == errno) + new_exist = false; + else + return 1; + } + + // Both old and new files can exist if: + // - rename() is not completed + // - fcreate was replayed during recovery + // 'Stalled cachefiles' container cachefile_list::m_stale_fileid contains + // closed but not yet evicted cachefiles and the key of this container is + // fs-dependent file id - (device id, inode number) pair. As it is supposed + // new file have not yet created during recovery process the 'stalled + // cachefile' container can contain only cache file of old file. + // To preserve the old cachefile file's id and keep it in + // 'stalled cachefiles' container the new file is removed + // and the old file is renamed. + if (old_exist && new_exist && + (toku_os_unlink(new_iname_full.get()) == -1 || + toku_os_rename(old_iname_full.get(), new_iname_full.get()) == -1 || + toku_fsync_directory(old_iname_full.get()) == -1 || + toku_fsync_directory(new_iname_full.get()) == -1)) + return 1; + + if (old_exist && !new_exist && + (toku_os_rename(old_iname_full.get(), new_iname_full.get()) == -1 || + toku_fsync_directory(old_iname_full.get()) == -1 || + toku_fsync_directory(new_iname_full.get()) == -1)) + return 1; + + if (file_map_find(&renv->fmap, l->old_filenum, &tuple) != DB_NOTFOUND) { + if (tuple->iname) + toku_free(tuple->iname); + tuple->iname = toku_xstrdup(l->new_iname.data); + } + + TOKUTXN txn = NULL; + toku_txnid2txn(renv->logger, l->xid, &txn); + + if (txn) + toku_logger_save_rollback_frename(txn, &l->old_iname, &l->new_iname); + + return 0; +} + +static int toku_recover_backward_frename(struct logtype_frename *UU(l), + RECOVER_ENV UU(renv)) { + // nothing + return 0; +} + static int toku_recover_enq_insert (struct logtype_enq_insert *l, RECOVER_ENV renv) { int r; TOKUTXN txn = NULL; diff --git a/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h b/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h index 92f1e278e1a01..eb8c953b08c13 100644 --- a/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h +++ b/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h @@ -106,6 +106,7 @@ namespace MhsRbTree { static const uint64_t MHS_MAX_VAL = 0xffffffffffffffff; OUUInt64() : _value(0) {} OUUInt64(uint64_t s) : _value(s) {} + OUUInt64(const OUUInt64& o) : _value(o._value) {} bool operator<(const OUUInt64 &r) const { invariant(!(_value == MHS_MAX_VAL && r.ToInt() == MHS_MAX_VAL)); return _value < r.ToInt(); @@ -182,15 +183,18 @@ namespace MhsRbTree { class Node { public: - struct BlockPair { + class BlockPair { + public: OUUInt64 _offset; OUUInt64 _size; BlockPair() : _offset(0), _size(0) {} BlockPair(uint64_t o, uint64_t s) : _offset(o), _size(s) {} - BlockPair(OUUInt64 o, OUUInt64 s) : _offset(o), _size(s) {} - int operator<(const struct BlockPair &rhs) const { + BlockPair(const BlockPair &o) + : _offset(o._offset), _size(o._size) {} + + int operator<(const BlockPair &rhs) const { return _offset < rhs._offset; } int operator<(const uint64_t &o) const { return _offset < o; } @@ -203,15 +207,15 @@ namespace MhsRbTree { }; EColor _color; - struct BlockPair _hole; - struct Pair _label; + BlockPair _hole; + Pair _label; Node *_left; Node *_right; Node *_parent; Node(EColor c, Node::BlockPair h, - struct Pair lb, + Pair lb, Node *l, Node *r, Node *p) diff --git a/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc b/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc index 85f29ce9813ef..cefe66335a6cb 100644 --- a/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc +++ b/storage/tokudb/PerconaFT/ft/tests/test-rbtree-insert-remove-without-mhs.cc @@ -53,9 +53,10 @@ static void generate_random_input() { std::srand(unsigned(std::time(0))); // set some values: - for (uint64_t i = 1; i < N; ++i) { - input_vector.push_back({i, 0}); - old_vector[i] = {i, 0}; + for (uint64_t i = 0; i < N; ++i) { + MhsRbTree::Node::BlockPair bp = {i+1, 0}; + input_vector.push_back(bp); + old_vector[i] = bp; } // using built-in random generator: std::random_shuffle(input_vector.begin(), input_vector.end(), myrandom); diff --git a/storage/tokudb/PerconaFT/ft/txn/roll.cc b/storage/tokudb/PerconaFT/ft/txn/roll.cc index 90eee1e580a80..9f3977743a049 100644 --- a/storage/tokudb/PerconaFT/ft/txn/roll.cc +++ b/storage/tokudb/PerconaFT/ft/txn/roll.cc @@ -38,13 +38,13 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. /* rollback and rollforward routines. */ - -#include "ft/ft.h" +#include #include "ft/ft-ops.h" +#include "ft/ft.h" #include "ft/log_header.h" #include "ft/logger/log-internal.h" -#include "ft/txn/xids.h" #include "ft/txn/rollback-apply.h" +#include "ft/txn/xids.h" // functionality provided by roll.c is exposed by an autogenerated // header file, logheader.h @@ -162,10 +162,122 @@ toku_rollback_fcreate (FILENUM filenum, // directory row lock for its dname) and we would not get this // far if there were other live handles. toku_cachefile_unlink_on_close(cf); + toku_cachefile_skip_log_recover_on_close(cf); done: return 0; } +int toku_commit_frename(BYTESTRING /* old_name */, + BYTESTRING /* new_iname */, + TOKUTXN /* txn */, + LSN UU(oplsn)) { + return 0; +} + +int toku_rollback_frename(BYTESTRING old_iname, + BYTESTRING new_iname, + TOKUTXN txn, + LSN UU(oplsn)) { + assert(txn); + assert(txn->logger); + assert(txn->logger->ct); + + CACHETABLE cachetable = txn->logger->ct; + + toku_struct_stat stat; + bool old_exist = true; + bool new_exist = true; + + std::unique_ptr old_iname_full( + toku_cachetable_get_fname_in_cwd(cachetable, old_iname.data), + &toku_free); + std::unique_ptr new_iname_full( + toku_cachetable_get_fname_in_cwd(cachetable, new_iname.data), + &toku_free); + + if (toku_stat(old_iname_full.get(), &stat) == -1) { + if (ENOENT == errno) + old_exist = false; + else + return 1; + } + + if (toku_stat(new_iname_full.get(), &stat) == -1) { + if (ENOENT == errno) + new_exist = false; + else + return 1; + } + + // Both old and new files can exist if: + // - rename() is not completed + // - fcreate was replayed during recovery + // 'Stalled cachefiles' container cachefile_list::m_stale_fileid contains + // closed but not yet evicted cachefiles and the key of this container is + // fs-dependent file id - (device id, inode number) pair. To preserve the + // new cachefile + // file's id and keep it in 'stalled cachefiles' container the old file is + // removed + // and the new file is renamed. + if (old_exist && new_exist && + (toku_os_unlink(old_iname_full.get()) == -1 || + toku_os_rename(new_iname_full.get(), old_iname_full.get()) == -1 || + toku_fsync_directory(new_iname_full.get()) == -1 || + toku_fsync_directory(old_iname_full.get()) == -1)) + return 1; + + if (!old_exist && new_exist && + (toku_os_rename(new_iname_full.get(), old_iname_full.get()) == -1 || + toku_fsync_directory(new_iname_full.get()) == -1 || + toku_fsync_directory(old_iname_full.get()) == -1)) + return 1; + + // it's ok if both files do not exist on recovery + if (!old_exist && !new_exist) + assert(txn->for_recovery); + + CACHEFILE cf; + int r = toku_cachefile_of_iname_in_env(cachetable, new_iname.data, &cf); + if (r != ENOENT) { + char *old_fname_in_cf = toku_cachefile_fname_in_env(cf); + toku_cachefile_set_fname_in_env(cf, toku_xstrdup(old_iname.data)); + toku_free(old_fname_in_cf); + // There is at least one case when fclose logging cause error: + // 1) start transaction + // 2) create ft 'a'(write "fcreate" in recovery log) + // 3) rename ft 'a' to 'b'(write "frename" in recovery log) + // 4) abort transaction: + // a) rollback rename ft (renames 'b' to 'a') + // b) rollback create ft (removes 'a'): + // invokes toku_cachefile_unlink_on_close - lazy unlink on file + // close, + // it just sets corresponding flag in cachefile object + // c) write "unlink" for 'a' in recovery log + // (when transaction is aborted all locks are released, + // when file lock is released the file is closed and unlinked if + // corresponding flag is set in cachefile object) + // 5) crash + // + // After this we have the following records in recovery log: + // - create ft 'a', + // - rename 'a' to 'b', + // - unlink 'a' + // + // On recovery: + // - create 'a' + // - rename 'a' to 'b' + // - unlink 'a' - as 'a' file does not exist we have crash on assert + // here + // + // There is no need to write "unlink" in recovery log in (4a) because + // 'a' will be removed + // on transaction rollback on recovery. + toku_cachefile_skip_log_recover_on_close(cf); + } + + return 0; +} + int find_ft_from_filenum (const FT &ft, const FILENUM &filenum); int find_ft_from_filenum (const FT &ft, const FILENUM &filenum) { FILENUM thisfnum = toku_cachefile_filenum(ft->cf); diff --git a/storage/tokudb/PerconaFT/portability/file.cc b/storage/tokudb/PerconaFT/portability/file.cc index 5332a2dff5553..0e3efc1a12afc 100644 --- a/storage/tokudb/PerconaFT/portability/file.cc +++ b/storage/tokudb/PerconaFT/portability/file.cc @@ -356,6 +356,12 @@ toku_os_close(int fd) { // if EINTR, retry until success return r; } +int toku_os_rename(const char *old_name, const char *new_name) { + return rename(old_name, new_name); +} + +int toku_os_unlink(const char *path) { return unlink(path); } + ssize_t toku_os_read(int fd, void *buf, size_t count) { ssize_t r; diff --git a/storage/tokudb/PerconaFT/portability/memory.cc b/storage/tokudb/PerconaFT/portability/memory.cc index 2de12699c61f9..5430ff84b7059 100644 --- a/storage/tokudb/PerconaFT/portability/memory.cc +++ b/storage/tokudb/PerconaFT/portability/memory.cc @@ -313,6 +313,15 @@ toku_strdup(const char *s) { return (char *) toku_memdup(s, strlen(s)+1); } +char *toku_strndup(const char *s, size_t n) { + size_t s_size = strlen(s); + size_t bytes_to_copy = n > s_size ? s_size : n; + ++bytes_to_copy; + char *result = (char *)toku_memdup(s, bytes_to_copy); + result[bytes_to_copy - 1] = 0; + return result; +} + void toku_free(void *p) { if (p) { diff --git a/storage/tokudb/PerconaFT/portability/memory.h b/storage/tokudb/PerconaFT/portability/memory.h index 7780536f279ec..5ae652d39fc52 100644 --- a/storage/tokudb/PerconaFT/portability/memory.h +++ b/storage/tokudb/PerconaFT/portability/memory.h @@ -125,7 +125,9 @@ size_t toku_malloc_usable_size(void *p) __attribute__((__visibility__("default") void *toku_memdup (const void *v, size_t len); /* Toku-version of strdup. Use this so that it calls toku_malloc() */ char *toku_strdup (const char *s) __attribute__((__visibility__("default"))); - +/* Toku-version of strndup. Use this so that it calls toku_malloc() */ +char *toku_strndup(const char *s, size_t n) + __attribute__((__visibility__("default"))); /* Copy memory. Analogous to strdup() Crashes instead of returning NULL */ void *toku_xmemdup (const void *v, size_t len) __attribute__((__visibility__("default"))); /* Toku-version of strdup. Use this so that it calls toku_xmalloc() Crashes instead of returning NULL */ diff --git a/storage/tokudb/PerconaFT/portability/toku_portability.h b/storage/tokudb/PerconaFT/portability/toku_portability.h index 921d3a309f6c0..f127b0fe172dc 100644 --- a/storage/tokudb/PerconaFT/portability/toku_portability.h +++ b/storage/tokudb/PerconaFT/portability/toku_portability.h @@ -246,6 +246,8 @@ int toku_os_open(const char *path, int oflag, int mode); int toku_os_open_direct(const char *path, int oflag, int mode); int toku_os_close(int fd); int toku_os_fclose(FILE * stream); +int toku_os_rename(const char *old_name, const char *new_name); +int toku_os_unlink(const char *path); ssize_t toku_os_read(int fd, void *buf, size_t count); ssize_t toku_os_pread(int fd, void *buf, size_t count, off_t offset); void toku_os_recursive_delete(const char *path); diff --git a/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt b/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt index 47f6aa44a75e8..c01a8f0d62870 100644 --- a/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt +++ b/storage/tokudb/PerconaFT/src/tests/CMakeLists.txt @@ -108,11 +108,11 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS) foreach(ov c d r) if (ov STREQUAL c) - set(gset 0) set(hset 0) + set(iset 0) else () - set(gset 0 1 2 3 4 5) - set(hset 0 1) + set(hset 0 1 2 3 4 5) + set(iset 0 1) endif () foreach(av 0 1) @@ -130,25 +130,27 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS) foreach(dv ${dset}) foreach(ev ${eset}) foreach(fv 0 1) - foreach(gv ${gset}) + foreach(gv 0 1) foreach(hv ${hset}) - - if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv))) - set(iset 0 1) - else () - set(iset 0) - endif () - foreach(iv ${iset}) - set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}") - set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}") - set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}.ctest-errors") - add_test(NAME ${testname} - COMMAND run_recovery_fileops_unit.sh $ ${errfile} 137 - -O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv} - ) - setup_toku_test_properties(${testname} ${envdir}) - set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}") + + if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv))) + set(jset 0 1) + else () + set(jset 0) + endif () + + foreach(jv ${jset}) + set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}${jv}") + set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}${jv}") + set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}${jv}.ctest-errors") + add_test(NAME ${testname} + COMMAND run_recovery_fileops_unit.sh $ ${errfile} 137 + -O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv} -J ${jv} + ) + setup_toku_test_properties(${testname} ${envdir}) + set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}") + endforeach(jv) endforeach(iv) endforeach(hv) endforeach(gv) diff --git a/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc b/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc index 2c905c5ff122e..cc99ab560d85e 100644 --- a/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc +++ b/storage/tokudb/PerconaFT/src/tests/recovery_fileops_unit.cc @@ -36,17 +36,17 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." -#include "test.h" -#include "toku_pthread.h" #include -#include #include - +#include +#include "ft/logger/logger.h" +#include "test.h" +#include "toku_pthread.h" static int do_recover; static int do_crash; static char fileop; -static int choices['I'-'A'+1]; +static int choices['J' - 'A' + 1]; const int num_choices = sizeof(choices)/sizeof(choices[0]); static DB_TXN *txn; const char *oldname = "oldfoo"; @@ -58,11 +58,14 @@ static char *cmd; static void usage(void) { - fprintf(stderr, "Usage:\n%s [-v|-q]* [-h] (-c|-r) -O fileop -A# -B# -C# -D# -E# -F# [-G# -H# -I#]\n" - " fileop = c/r/d (create/rename/delete)\n" - " Where # is a single digit number > 0.\n" - " A-F are required for fileop=create\n" - " A-I are required for fileop=delete, fileop=rename\n", cmd); + fprintf(stderr, + "Usage:\n%s [-v|-q]* [-h] (-c|-r) -O fileop -A# -B# -C# -D# -E# " + "-F# -G# [-H# -I# -J#]\n" + " fileop = c/r/d (create/rename/delete)\n" + " Where # is a single digit number > 0.\n" + " A-G are required for fileop=create\n" + " A-I are required for fileop=delete, fileop=rename\n", + cmd); exit(1); } @@ -129,19 +132,18 @@ get_choice_flush_log_before_crash(void) { return get_bool_choice('F'); } -static int -get_choice_create_type(void) { - return get_x_choice('G', 6); -} +static int get_choice_dir_per_db(void) { return get_bool_choice('G'); } + +static int get_choice_create_type(void) { return get_x_choice('H', 6); } static int get_choice_txn_does_open_close_before_fileop(void) { - return get_bool_choice('H'); + return get_bool_choice('I'); } static int get_choice_lock_table_split_fcreate(void) { - int choice = get_bool_choice('I'); + int choice = get_bool_choice('J'); if (choice) assert(fileop_did_commit()); return choice; @@ -156,63 +158,65 @@ do_args(int argc, char * const argv[]) { choices[i] = -1; } - int c; - while ((c = getopt(argc, argv, "vqhcrO:A:B:C:D:E:F:G:H:I:X:")) != -1) { - switch(c) { - case 'v': - verbose++; - break; - case 'q': - verbose--; - if (verbose<0) verbose=0; - break; - case 'h': - case '?': - usage(); - break; - case 'c': - do_crash = 1; - break; - case 'r': - do_recover = 1; - break; - case 'O': - if (fileop != '\0') + char c; + while ((c = getopt(argc, argv, "vqhcrO:A:B:C:D:E:F:G:H:I:J:X:")) != -1) { + switch (c) { + case 'v': + verbose++; + break; + case 'q': + verbose--; + if (verbose < 0) + verbose = 0; + break; + case 'h': + case '?': usage(); - fileop = optarg[0]; - switch (fileop) { - case 'c': - case 'r': - case 'd': - break; - default: + break; + case 'c': + do_crash = 1; + break; + case 'r': + do_recover = 1; + break; + case 'O': + if (fileop != '\0') usage(); - break; - } - break; - case 'A': - case 'B': - case 'C': - case 'D': - case 'E': - case 'F': - case 'G': - case 'H': - case 'I': - if (fileop == '\0') - usage(); - int num; - num = atoi(optarg); - if (num < 0 || num > 9) - usage(); - choices[c - 'A'] = num; - break; - case 'X': - if (strcmp(optarg, "novalgrind") == 0) { - // provide a way for the shell script runner to pass an - // arg that suppresses valgrind on this child process + fileop = optarg[0]; + switch (fileop) { + case 'c': + case 'r': + case 'd': + break; + default: + usage(); + break; + } + break; + case 'A': + case 'B': + case 'C': + case 'D': + case 'E': + case 'F': + case 'G': + case 'H': + case 'I': + case 'J': + if (fileop == '\0') + usage(); + int num; + num = atoi(optarg); + if (num < 0 || num > 9) + usage(); + choices[c - 'A'] = num; break; - } + case 'X': + if (strcmp(optarg, "novalgrind") == 0) { + // provide a way for the shell script runner to pass an + // arg that suppresses valgrind on this child process + break; + } // otherwise, fall through to an error default: usage(); @@ -222,7 +226,7 @@ do_args(int argc, char * const argv[]) { if (argc!=optind) { usage(); exit(1); } for (i = 0; i < num_choices; i++) { - if (i >= 'G' - 'A' && fileop == 'c') + if (i >= 'H' - 'A' && fileop == 'c') break; if (choices[i] == -1) usage(); @@ -261,6 +265,8 @@ static void env_startup(void) { int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE | recover_flag; r = db_env_create(&env, 0); CKERR(r); + r = env->set_dir_per_db(env, get_choice_dir_per_db()); + CKERR(r); env->set_errfile(env, stderr); r = env->open(env, TOKU_TEST_FILENAME, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); @@ -625,8 +631,11 @@ recover_and_verify(void) { else if (did_create_commit_early()) expect_old_name = 1; } - verify_file_exists(oldname, expect_old_name); - verify_file_exists(newname, expect_new_name); + // We can't expect files existence until recovery log was not flushed + if ((get_choice_flush_log_before_crash())) { + verify_file_exists(oldname, expect_old_name); + verify_file_exists(newname, expect_new_name); + } env_shutdown(); } diff --git a/storage/tokudb/PerconaFT/src/ydb-internal.h b/storage/tokudb/PerconaFT/src/ydb-internal.h index 2d6c84126e168..d40f7795b0b86 100644 --- a/storage/tokudb/PerconaFT/src/ydb-internal.h +++ b/storage/tokudb/PerconaFT/src/ydb-internal.h @@ -132,7 +132,8 @@ struct __toku_db_env_internal { int datadir_lockfd; int logdir_lockfd; int tmpdir_lockfd; - bool check_thp; // if set check if transparent huge pages are disables + bool check_thp; // if set check if transparent huge pages are disabled + bool dir_per_db; uint64_t (*get_loader_memory_size_callback)(void); uint64_t default_lock_timeout_msec; uint64_t (*get_lock_timeout_callback)(uint64_t default_lock_timeout_msec); diff --git a/storage/tokudb/PerconaFT/src/ydb.cc b/storage/tokudb/PerconaFT/src/ydb.cc index aed271bce4069..3341f6d76c624 100644 --- a/storage/tokudb/PerconaFT/src/ydb.cc +++ b/storage/tokudb/PerconaFT/src/ydb.cc @@ -1298,6 +1298,22 @@ env_get_check_thp(DB_ENV * env) { return env->i->check_thp; } +static bool env_set_dir_per_db(DB_ENV *env, bool new_val) { + HANDLE_PANICKED_ENV(env); + bool r = env->i->dir_per_db; + env->i->dir_per_db = new_val; + return r; +} + +static bool env_get_dir_per_db(DB_ENV *env) { + HANDLE_PANICKED_ENV(env); + return env->i->dir_per_db; +} + +static const char *env_get_data_dir(DB_ENV *env) { + return env->i->real_data_dir; +} + static int env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags); static int @@ -2700,6 +2716,9 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) { USENV(do_backtrace); USENV(set_check_thp); USENV(get_check_thp); + USENV(set_dir_per_db); + USENV(get_dir_per_db); + USENV(get_data_dir); #undef USENV // unlocked methods @@ -3045,7 +3064,7 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co if (env_is_db_with_dname_open(env, newname)) { return toku_ydb_do_error(env, EINVAL, "Cannot rename dictionary; Dictionary with target name has an open handle.\n"); } - + DBT old_dname_dbt; DBT new_dname_dbt; DBT iname_dbt; @@ -3065,10 +3084,35 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co r = EEXIST; } else if (r == DB_NOTFOUND) { + DBT new_iname_dbt; + // Do not rename ft file if 'dir_per_db' option is not set + auto new_iname = + env->get_dir_per_db(env) + ? generate_iname_for_rename_or_open( + env, txn, newname, false) + : std::unique_ptr( + toku_strdup(iname), &toku_free); + toku_fill_dbt( + &new_iname_dbt, new_iname.get(), strlen(new_iname.get()) + 1); + // remove old (dname,iname) and insert (newname,iname) in directory r = toku_db_del(env->i->directory, txn, &old_dname_dbt, DB_DELETE_ANY, true); if (r != 0) { goto exit; } - r = toku_db_put(env->i->directory, txn, &new_dname_dbt, &iname_dbt, 0, true); + + // Do not rename ft file if 'dir_per_db' option is not set + if (env->get_dir_per_db(env)) + r = toku_ft_rename_iname(txn, + env->get_data_dir(env), + iname, + new_iname.get(), + env->i->cachetable); + + r = toku_db_put(env->i->directory, + txn, + &new_dname_dbt, + &new_iname_dbt, + 0, + true); if (r != 0) { goto exit; } //Now that we have writelocks on both dnames, verify that there are still no handles open. (to prevent race conditions) @@ -3091,7 +3135,7 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co // otherwise, we're okay in marking this ft as remove on // commit. no new handles can open for this dictionary // because the txn has directory write locks on the dname - if (txn && !can_acquire_table_lock(env, txn, iname)) { + if (txn && !can_acquire_table_lock(env, txn, new_iname.get())) { r = DB_LOCK_NOTGRANTED; } // We don't do anything at the ft or cachetable layer for rename. diff --git a/storage/tokudb/PerconaFT/src/ydb_db.cc b/storage/tokudb/PerconaFT/src/ydb_db.cc index e5bd4e7d089d5..100d1bfa20b14 100644 --- a/storage/tokudb/PerconaFT/src/ydb_db.cc +++ b/storage/tokudb/PerconaFT/src/ydb_db.cc @@ -83,8 +83,7 @@ ydb_db_layer_get_status(YDB_DB_LAYER_STATUS statp) { *statp = ydb_db_layer_status; } -static void -create_iname_hint(const char *dname, char *hint) { +void create_iname_hint(const char *dname, char *hint) { //Requires: size of hint array must be > strlen(dname) //Copy alphanumeric characters only. //Replace strings of non-alphanumeric characters with a single underscore. @@ -105,11 +104,43 @@ create_iname_hint(const char *dname, char *hint) { *hint = '\0'; } +void create_iname_hint_for_dbdir(const char *dname, char *hint) { + assert(dname); + if (*dname == '.') + ++dname; + if (*dname == '/') + ++dname; + bool underscored = false; + bool dbdir_is_parsed = false; + // Do not change the first '/' because this is + // delimiter which splits name into database dir + // and table dir. + while (*dname) { + if (isalnum(*dname) || (*dname == '/' && !dbdir_is_parsed)) { + char c = *dname++; + *hint++ = c; + if (c == '/') + dbdir_is_parsed = true; + underscored = false; + } else { + if (!underscored) + *hint++ = '_'; + dname++; + underscored = true; + } + } + *hint = '\0'; +} + // n < 0 means to ignore mark and ignore n // n >= 0 means to include mark ("_B_" or "_P_") with hex value of n in iname // (intended for use by loader, which will create many inames using one txnid). -static char * -create_iname(DB_ENV *env, uint64_t id1, uint64_t id2, char *hint, const char *mark, int n) { +char *create_iname(DB_ENV *env, + uint64_t id1, + uint64_t id2, + char *hint, + const char *mark, + int n) { int bytes; char inamebase[strlen(hint) + 8 + // hex file format version @@ -138,6 +169,34 @@ create_iname(DB_ENV *env, uint64_t id1, uint64_t id2, char *hint, const char *ma return rval; } +static uint64_t nontransactional_open_id = 0; + +std::unique_ptr generate_iname_for_rename_or_open( + DB_ENV *env, + DB_TXN *txn, + const char *dname, + bool is_open) { + std::unique_ptr result(nullptr, &toku_free); + char hint[strlen(dname) + 1]; + uint64_t id1 = 0; + uint64_t id2 = 0; + + if (txn) { + id1 = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn).parent_id64; + id2 = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn).child_id64; + } else if (is_open) + id1 = toku_sync_fetch_and_add(&nontransactional_open_id, 1); + + if (env->get_dir_per_db(env) && !toku_os_is_absolute_name(dname)) + create_iname_hint_for_dbdir(dname, hint); + else + create_iname_hint(dname, hint); + + result.reset(create_iname(env, id1, id2, hint, NULL, -1)); + + return result; +} + static int toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode); // Effect: Do the work required of DB->close(). @@ -227,8 +286,6 @@ db_open_subdb(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTY return r; } -static uint64_t nontransactional_open_id = 0; - // inames are created here. // algorithm: // begin txn @@ -286,27 +343,15 @@ toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYP toku_fill_dbt(&dname_dbt, dname, strlen(dname)+1); toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC); r = toku_db_get(db->dbenv->i->directory, txn, &dname_dbt, &iname_dbt, DB_SERIALIZABLE); // allocates memory for iname - char *iname = (char *) iname_dbt.data; + std::unique_ptr iname( + static_cast(iname_dbt.data), &toku_free); if (r == DB_NOTFOUND && !is_db_create) { r = ENOENT; } else if (r==0 && is_db_excl) { r = EEXIST; } else if (r == DB_NOTFOUND) { - char hint[strlen(dname) + 1]; - - // create iname and make entry in directory - uint64_t id1 = 0; - uint64_t id2 = 0; - - if (txn) { - id1 = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn).parent_id64; - id2 = toku_txn_get_txnid(db_txn_struct_i(txn)->tokutxn).child_id64; - } else { - id1 = toku_sync_fetch_and_add(&nontransactional_open_id, 1); - } - create_iname_hint(dname, hint); - iname = create_iname(db->dbenv, id1, id2, hint, NULL, -1); // allocated memory for iname - toku_fill_dbt(&iname_dbt, iname, strlen(iname) + 1); + iname = generate_iname_for_rename_or_open(db->dbenv, txn, dname, true); + toku_fill_dbt(&iname_dbt, iname.get(), strlen(iname.get()) + 1); // // put_flags will be 0 for performance only, avoid unnecessary query // if we are creating a hot index, per #3166, we do not want the write lock in directory grabbed. @@ -318,16 +363,13 @@ toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYP // we now have an iname if (r == 0) { - r = toku_db_open_iname(db, txn, iname, flags, mode); + r = toku_db_open_iname(db, txn, iname.get(), flags, mode); if (r == 0) { db->i->dname = toku_xstrdup(dname); env_note_db_opened(db->dbenv, db); // tell env that a new db handle is open (using dname) } } - if (iname) { - toku_free(iname); - } return r; } @@ -1181,7 +1223,10 @@ load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new toku_fill_dbt(&dname_dbt, dname, strlen(dname)+1); // now create new iname char hint[strlen(dname) + 1]; - create_iname_hint(dname, hint); + if (env->get_dir_per_db(env) && !toku_os_is_absolute_name(dname)) + create_iname_hint_for_dbdir(dname, hint); + else + create_iname_hint(dname, hint); const char *new_iname = create_iname(env, xid.parent_id64, xid.child_id64, hint, mark, i); // allocates memory for iname_in_env new_inames_in_env[i] = new_iname; toku_fill_dbt(&iname_dbt, new_iname, strlen(new_iname) + 1); // iname_in_env goes in directory diff --git a/storage/tokudb/PerconaFT/src/ydb_db.h b/storage/tokudb/PerconaFT/src/ydb_db.h index 8b92dd1c3cb83..8be28857c142b 100644 --- a/storage/tokudb/PerconaFT/src/ydb_db.h +++ b/storage/tokudb/PerconaFT/src/ydb_db.h @@ -43,6 +43,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #include "ydb-internal.h" #include "ydb_txn.h" +#include + typedef enum { YDB_LAYER_DIRECTORY_WRITE_LOCKS = 0, /* total directory write locks taken */ YDB_LAYER_DIRECTORY_WRITE_LOCKS_FAIL, /* total directory write locks unable to be taken */ @@ -119,3 +121,17 @@ toku_db_destruct_autotxn(DB_TXN *txn, int r, bool changed) { } return r; } + +void create_iname_hint_for_dbdir(const char *dname, char *hint); +void create_iname_hint(const char *dname, char *hint); +char *create_iname(DB_ENV *env, + uint64_t id1, + uint64_t id2, + char *hint, + const char *mark, + int n); +std::unique_ptr generate_iname_for_rename_or_open( + DB_ENV *env, + DB_TXN *txn, + const char *dname, + bool is_open); diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc index 2b121189e8351..5e49e8d95d07a 100644 --- a/storage/tokudb/hatoku_hton.cc +++ b/storage/tokudb/hatoku_hton.cc @@ -543,6 +543,7 @@ static int tokudb_init_func(void *p) { db_env->change_fsync_log_period(db_env, tokudb::sysvars::fsync_log_period); db_env->set_lock_timeout_callback(db_env, tokudb_lock_timeout_callback); + db_env->set_dir_per_db(db_env, tokudb::sysvars::dir_per_db); db_env->set_loader_memory_size( db_env, diff --git a/storage/tokudb/mysql-test/tokudb/r/dir-per-db-with-custom-data-dir.result b/storage/tokudb/mysql-test/tokudb/r/dir-per-db-with-custom-data-dir.result new file mode 100644 index 0000000000000..a36dbcb28c042 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/dir-per-db-with-custom-data-dir.result @@ -0,0 +1,10 @@ +SELECT @@tokudb_dir_per_db; +@@tokudb_dir_per_db +1 +TOKUDB_DATA_DIR_CHANGED +1 +CREATE DATABASE tokudb_test; +USE tokudb_test; +CREATE TABLE t (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY) ENGINE=tokudb; +DROP TABLE t; +DROP DATABASE tokudb_test; diff --git a/storage/tokudb/mysql-test/tokudb/r/dir_per_db.result b/storage/tokudb/mysql-test/tokudb/r/dir_per_db.result new file mode 100644 index 0000000000000..371f97406c8eb --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/dir_per_db.result @@ -0,0 +1,180 @@ +######## +# tokudb_dir_per_db = 1 +######## +SET GLOBAL tokudb_dir_per_db= 1; +######## +# CREATE +######## +CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb; +INSERT INTO t1 SET b = 10; +INSERT INTO t1 SET b = 20; +SELECT b FROM t1 ORDER BY a; +b +10 +20 +CREATE INDEX b ON t1 (b); +CREATE INDEX ab ON t1 (a,b); +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t1_key_ab_id.tokudb +t1_key_b_id.tokudb +t1_main_id.tokudb +t1_status_id.tokudb +######## +# RENAME +######## +RENAME TABLE t1 TO t2; +SELECT b FROM t2 ORDER BY a; +b +10 +20 +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t2_key_ab_id.tokudb +t2_key_b_id.tokudb +t2_main_id.tokudb +t2_status_id.tokudb +######## +# DROP +######## +DROP TABLE t2; +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +######## +# tokudb_dir_per_db = 0 +######## +SET GLOBAL tokudb_dir_per_db= 0; +######## +# CREATE +######## +CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb; +INSERT INTO t1 SET b = 10; +INSERT INTO t1 SET b = 20; +SELECT b FROM t1 ORDER BY a; +b +10 +20 +CREATE INDEX b ON t1 (b); +CREATE INDEX ab ON t1 (a,b); +## Looking for *.tokudb files in data_dir +_test_t1_key_ab_id.tokudb +_test_t1_key_b_id.tokudb +_test_t1_main_id.tokudb +_test_t1_status_id.tokudb +## Looking for *.tokudb files in data_dir/test +######## +# RENAME +######## +RENAME TABLE t1 TO t2; +SELECT b FROM t2 ORDER BY a; +b +10 +20 +## Looking for *.tokudb files in data_dir +_test_t1_key_ab_id.tokudb +_test_t1_key_b_id.tokudb +_test_t1_main_id.tokudb +_test_t1_status_id.tokudb +## Looking for *.tokudb files in data_dir/test +######## +# DROP +######## +DROP TABLE t2; +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +######## +# CREATE on tokudb_dir_per_db = 0 and RENAME on tokudb_dir_per_db = 1 and vice versa +######## +######## +# tokudb_dir_per_db = (1 - 1); +######## +SET GLOBAL tokudb_dir_per_db= (1 - 1);; +######## +# CREATE +######## +CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb; +INSERT INTO t1 SET b = 10; +INSERT INTO t1 SET b = 20; +SELECT b FROM t1 ORDER BY a; +b +10 +20 +CREATE INDEX b ON t1 (b); +CREATE INDEX ab ON t1 (a,b); +## Looking for *.tokudb files in data_dir +_test_t1_key_ab_id.tokudb +_test_t1_key_b_id.tokudb +_test_t1_main_id.tokudb +_test_t1_status_id.tokudb +## Looking for *.tokudb files in data_dir/test +######## +# tokudb_dir_per_db = 1 +######## +SET GLOBAL tokudb_dir_per_db= 1; +######## +# RENAME +######## +RENAME TABLE t1 TO t2; +SELECT b FROM t2 ORDER BY a; +b +10 +20 +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t2_key_ab_id.tokudb +t2_key_b_id.tokudb +t2_main_id.tokudb +t2_status_id.tokudb +######## +# DROP +######## +DROP TABLE t2; +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +######## +# tokudb_dir_per_db = (1 - 0); +######## +SET GLOBAL tokudb_dir_per_db= (1 - 0);; +######## +# CREATE +######## +CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb; +INSERT INTO t1 SET b = 10; +INSERT INTO t1 SET b = 20; +SELECT b FROM t1 ORDER BY a; +b +10 +20 +CREATE INDEX b ON t1 (b); +CREATE INDEX ab ON t1 (a,b); +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t1_key_ab_id.tokudb +t1_key_b_id.tokudb +t1_main_id.tokudb +t1_status_id.tokudb +######## +# tokudb_dir_per_db = 0 +######## +SET GLOBAL tokudb_dir_per_db= 0; +######## +# RENAME +######## +RENAME TABLE t1 TO t2; +SELECT b FROM t2 ORDER BY a; +b +10 +20 +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t1_key_ab_id.tokudb +t1_key_b_id.tokudb +t1_main_id.tokudb +t1_status_id.tokudb +######## +# DROP +######## +DROP TABLE t2; +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +SET GLOBAL tokudb_dir_per_db=default; diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result index 6f9592ddc1fc0..ecd4d07720611 100644 --- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result +++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result @@ -2,6 +2,7 @@ set default_storage_engine='tokudb'; set tokudb_prelock_empty=false; drop table if exists t; create table t (id int primary key); +t should be empty select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; trx_id trx_mysql_thread_id select * from information_schema.tokudb_locks; @@ -15,17 +16,21 @@ insert into t values (1); set autocommit=0; set tokudb_lock_timeout=600000; insert into t values (1); +should find the presence of a lock on 1st transaction select * from information_schema.tokudb_locks; locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main +should find the presence of a lock_wait on the 2nd transaction select * from information_schema.tokudb_lock_waits; requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name REQUEST_TRX_ID BLOCK_TRX_ID ./test/t-main 0001000000 0001000000 LOCK_WAITS_START_TIME test t main +should find the presence of two transactions select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; trx_id trx_mysql_thread_id TRX_ID MYSQL_ID TRX_ID MYSQL_ID commit; +verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction select * from information_schema.tokudb_locks; locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main @@ -33,6 +38,8 @@ select * from information_schema.tokudb_lock_waits; requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name ERROR 23000: Duplicate entry '1' for key 'PRIMARY' commit; +verify that txn_a replace (1) blocks txn_b replace (1) and txn_b eventually gets the lock on (1) and completes +verify that the lock on the 2nd transaction has been released, should be be empty select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; trx_id trx_mysql_thread_id select * from information_schema.tokudb_locks; @@ -46,23 +53,28 @@ replace into t values (1); set autocommit=0; set tokudb_lock_timeout=600000; replace into t values (1); +should find the presence of a lock on 1st transaction select * from information_schema.tokudb_locks; locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main +should find the presence of a lock_wait on the 2nd transaction select * from information_schema.tokudb_lock_waits; requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name REQUEST_TRX_ID BLOCK_TRX_ID ./test/t-main 0001000000 0001000000 LOCK_WAITS_START_TIME test t main +should find the presence of two transactions select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; trx_id trx_mysql_thread_id TRX_ID MYSQL_ID TRX_ID MYSQL_ID commit; +verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction select * from information_schema.tokudb_locks; locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main select * from information_schema.tokudb_lock_waits; requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name commit; +verify that the lock on the 2nd transaction has been released, should be be empty select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; trx_id trx_mysql_thread_id select * from information_schema.tokudb_locks; diff --git a/storage/tokudb/mysql-test/tokudb/r/row_format.result b/storage/tokudb/mysql-test/tokudb/r/row_format.result new file mode 100644 index 0000000000000..cb66914844562 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/row_format.result @@ -0,0 +1,51 @@ +CREATE TABLE tokudb_row_format_test_1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_DEFAULT; +CREATE TABLE tokudb_row_format_test_2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_FAST; +CREATE TABLE tokudb_row_format_test_3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SMALL; +CREATE TABLE tokudb_row_format_test_4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED; +CREATE TABLE tokudb_row_format_test_5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB; +CREATE TABLE tokudb_row_format_test_6 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA; +CREATE TABLE tokudb_row_format_test_7 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ; +CREATE TABLE tokudb_row_format_test_8 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name like 'tokudb_row_format_test%' ORDER BY table_name; +table_name row_format engine +tokudb_row_format_test_1 tokudb_zlib TokuDB +tokudb_row_format_test_2 tokudb_quicklz TokuDB +tokudb_row_format_test_3 tokudb_lzma TokuDB +tokudb_row_format_test_4 tokudb_uncompressed TokuDB +tokudb_row_format_test_5 tokudb_zlib TokuDB +tokudb_row_format_test_6 tokudb_lzma TokuDB +tokudb_row_format_test_7 tokudb_quicklz TokuDB +tokudb_row_format_test_8 tokudb_snappy TokuDB +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_FAST; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; +table_name row_format engine +tokudb_row_format_test_1 tokudb_quicklz TokuDB +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_SMALL; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; +table_name row_format engine +tokudb_row_format_test_1 tokudb_lzma TokuDB +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; +table_name row_format engine +tokudb_row_format_test_1 tokudb_uncompressed TokuDB +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; +table_name row_format engine +tokudb_row_format_test_1 tokudb_zlib TokuDB +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; +table_name row_format engine +tokudb_row_format_test_1 tokudb_snappy TokuDB +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; +table_name row_format engine +tokudb_row_format_test_1 tokudb_quicklz TokuDB +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; +table_name row_format engine +tokudb_row_format_test_1 tokudb_lzma TokuDB +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_DEFAULT; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; +table_name row_format engine +tokudb_row_format_test_1 tokudb_zlib TokuDB +DROP TABLE tokudb_row_format_test_1, tokudb_row_format_test_2, tokudb_row_format_test_3, tokudb_row_format_test_4, tokudb_row_format_test_5, tokudb_row_format_test_6, tokudb_row_format_test_7, tokudb_row_format_test_8; diff --git a/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir-master.opt b/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir-master.opt new file mode 100644 index 0000000000000..a9090f4d1157e --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir-master.opt @@ -0,0 +1 @@ +--loose-tokudb_data_dir="$MYSQL_TMP_DIR" --loose-tokudb-dir-per-db=1 diff --git a/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir.test b/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir.test new file mode 100644 index 0000000000000..7f415a7251595 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/dir-per-db-with-custom-data-dir.test @@ -0,0 +1,16 @@ +--source include/have_tokudb.inc + +SELECT @@tokudb_dir_per_db; + +--disable_query_log +--eval SELECT STRCMP(@@tokudb_data_dir, '$MYSQL_TMP_DIR') = 0 AS TOKUDB_DATA_DIR_CHANGED +--enable_query_log + +CREATE DATABASE tokudb_test; +USE tokudb_test; +CREATE TABLE t (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY) ENGINE=tokudb; + +--file_exists $MYSQL_TMP_DIR/tokudb_test + +DROP TABLE t; +DROP DATABASE tokudb_test; diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_per_db.test b/storage/tokudb/mysql-test/tokudb/t/dir_per_db.test new file mode 100644 index 0000000000000..b638b706d8719 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/dir_per_db.test @@ -0,0 +1,76 @@ +source include/have_tokudb.inc; + +--let $DB= test +--let $DATADIR= `select @@datadir` +--let $i= 2 + +while ($i) { + --dec $i + --echo ######## + --echo # tokudb_dir_per_db = $i + --echo ######## + --eval SET GLOBAL tokudb_dir_per_db= $i + --echo ######## + --echo # CREATE + --echo ######## + CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb; + INSERT INTO t1 SET b = 10; + INSERT INTO t1 SET b = 20; + SELECT b FROM t1 ORDER BY a; + CREATE INDEX b ON t1 (b); + CREATE INDEX ab ON t1 (a,b); + --source dir_per_db_show_table_files.inc + --echo ######## + --echo # RENAME + --echo ######## + RENAME TABLE t1 TO t2; + SELECT b FROM t2 ORDER BY a; + --source dir_per_db_show_table_files.inc + --echo ######## + --echo # DROP + --echo ######## + DROP TABLE t2; + --source dir_per_db_show_table_files.inc +} + +--echo ######## +--echo # CREATE on tokudb_dir_per_db = 0 and RENAME on tokudb_dir_per_db = 1 and vice versa +--echo ######## + +--let $i= 2 + +while ($i) { + --dec $i + --let $inv_i= (1 - $i); + --echo ######## + --echo # tokudb_dir_per_db = $inv_i + --echo ######## + --eval SET GLOBAL tokudb_dir_per_db= $inv_i + --echo ######## + --echo # CREATE + --echo ######## + CREATE TABLE t1 (a INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, b INT(10) UNSIGNED NOT NULL) ENGINE=tokudb; + INSERT INTO t1 SET b = 10; + INSERT INTO t1 SET b = 20; + SELECT b FROM t1 ORDER BY a; + CREATE INDEX b ON t1 (b); + CREATE INDEX ab ON t1 (a,b); + --source dir_per_db_show_table_files.inc + --echo ######## + --echo # tokudb_dir_per_db = $i + --echo ######## + --eval SET GLOBAL tokudb_dir_per_db= $i + --echo ######## + --echo # RENAME + --echo ######## + RENAME TABLE t1 TO t2; + SELECT b FROM t2 ORDER BY a; + --source dir_per_db_show_table_files.inc + --echo ######## + --echo # DROP + --echo ######## + DROP TABLE t2; + --source dir_per_db_show_table_files.inc +} + +SET GLOBAL tokudb_dir_per_db=default; diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_per_db_show_table_files.inc b/storage/tokudb/mysql-test/tokudb/t/dir_per_db_show_table_files.inc new file mode 100644 index 0000000000000..bdf7d5b235ff9 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/dir_per_db_show_table_files.inc @@ -0,0 +1,9 @@ +--sorted_result + +--echo ## Looking for *.tokudb files in data_dir +--source include/table_files_replace_pattern.inc +--list_files $DATADIR *.tokudb + +--echo ## Looking for *.tokudb files in data_dir/$DB +--source include/table_files_replace_pattern.inc +--list_files $DATADIR/$DB/ *.tokudb diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test index 6488f27cfbb63..924b11e29d665 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test @@ -12,7 +12,7 @@ create table t (id int primary key); # verify that txn_a insert (1) blocks txn_b insert (1) and txn_b gets a duplicate key error -# should be empty +--echo t should be empty select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; select * from information_schema.tokudb_locks; select * from information_schema.tokudb_lock_waits; @@ -28,7 +28,7 @@ set autocommit=0; set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes send insert into t values (1); -# should find the presence of a lock on 1st transaction +--echo should find the presence of a lock on 1st transaction connection default; let $wait_condition= select count(*)=1 from information_schema.processlist where info='insert into t values (1)' and state='update'; source include/wait_condition.inc; @@ -37,17 +37,17 @@ real_sleep 1; # delay a little to shorten the update -> write row -> lock wait r replace_column 1 TRX_ID 2 MYSQL_ID; select * from information_schema.tokudb_locks; -# should find the presence of a lock_wait on the 2nd transaction +--echo should find the presence of a lock_wait on the 2nd transaction replace_column 1 REQUEST_TRX_ID 2 BLOCK_TRX_ID 6 LOCK_WAITS_START_TIME; select * from information_schema.tokudb_lock_waits; -# should find the presence of two transactions +--echo should find the presence of two transactions replace_column 1 TRX_ID 2 MYSQL_ID; select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; connection conn_a; commit; -# verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction +--echo verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction let $wait_condition= select count(*)=1 from information_schema.tokudb_locks where locks_dname='./test/t-main'; source include/wait_condition.inc; @@ -64,10 +64,8 @@ connection default; disconnect conn_a; disconnect conn_b; -# verify that txn_a replace (1) blocks txn_b replace (1) and txn_b eventually gets the lock on (1) and completes - -# verify that the lock on the 2nd transaction has been released -# should be be empty +--echo verify that txn_a replace (1) blocks txn_b replace (1) and txn_b eventually gets the lock on (1) and completes +--echo verify that the lock on the 2nd transaction has been released, should be be empty select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; select * from information_schema.tokudb_locks; select * from information_schema.tokudb_lock_waits; @@ -83,7 +81,7 @@ set autocommit=0; set tokudb_lock_timeout=600000; # set lock wait timeout to 10 minutes send replace into t values (1); -# should find the presence of a lock on 1st transaction +--echo should find the presence of a lock on 1st transaction connection default; let $wait_condition= select count(*)=1 from information_schema.processlist where info='replace into t values (1)' and state='update'; source include/wait_condition.inc; @@ -92,17 +90,19 @@ real_sleep 1; # delay a little to shorten the update -> write row -> lock wait r replace_column 1 TRX_ID 2 MYSQL_ID; select * from information_schema.tokudb_locks; -# should find the presence of a lock_wait on the 2nd transaction +--echo should find the presence of a lock_wait on the 2nd transaction replace_column 1 REQUEST_TRX_ID 2 BLOCK_TRX_ID 6 LOCK_WAITS_START_TIME; select * from information_schema.tokudb_lock_waits; -# should find the presence of two transactions +--echo should find the presence of two transactions replace_column 1 TRX_ID 2 MYSQL_ID; select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; connection conn_a; commit; -# verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction +--echo verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction +let $wait_condition= select count(*)=1 from information_schema.tokudb_locks where locks_dname='./test/t-main'; +source include/wait_condition.inc; replace_column 1 TRX_ID 2 MYSQL_ID; select * from information_schema.tokudb_locks; select * from information_schema.tokudb_lock_waits; @@ -115,8 +115,7 @@ connection default; disconnect conn_a; disconnect conn_b; -# verify that the lock on the 2nd transaction has been released -# should be be empty +--echo verify that the lock on the 2nd transaction has been released, should be be empty select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; select * from information_schema.tokudb_locks; select * from information_schema.tokudb_lock_waits; diff --git a/storage/tokudb/mysql-test/tokudb/t/row_format.test b/storage/tokudb/mysql-test/tokudb/t/row_format.test new file mode 100644 index 0000000000000..6533f8c06be95 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/row_format.test @@ -0,0 +1,41 @@ +# +# Test TokuDB compression option additions to row_format +# +--source include/have_tokudb.inc + +CREATE TABLE tokudb_row_format_test_1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_DEFAULT; +CREATE TABLE tokudb_row_format_test_2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_FAST; +CREATE TABLE tokudb_row_format_test_3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SMALL; +CREATE TABLE tokudb_row_format_test_4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED; +CREATE TABLE tokudb_row_format_test_5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB; +CREATE TABLE tokudb_row_format_test_6 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA; +CREATE TABLE tokudb_row_format_test_7 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ; +CREATE TABLE tokudb_row_format_test_8 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY; + +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name like 'tokudb_row_format_test%' ORDER BY table_name; + +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_FAST; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; + +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_SMALL; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; + +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; + +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; + +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; + +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; + +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; + +ALTER TABLE tokudb_row_format_test_1 ENGINE=TokuDB ROW_FORMAT=TOKUDB_DEFAULT; +SELECT table_name, row_format, engine FROM information_schema.tables WHERE table_name = 'tokudb_row_format_test_1'; + +DROP TABLE tokudb_row_format_test_1, tokudb_row_format_test_2, tokudb_row_format_test_3, tokudb_row_format_test_4, tokudb_row_format_test_5, tokudb_row_format_test_6, tokudb_row_format_test_7, tokudb_row_format_test_8; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result index 6ec3a2c807943..30e0bdbebd78b 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db938.result @@ -23,6 +23,7 @@ set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR d TRUNCATE TABLE t1; set global tokudb_debug_pause_background_job_manager = FALSE; set DEBUG_SYNC = 'now SIGNAL done'; +set DEBUG_SYNC = 'RESET'; drop table t1; set session tokudb_auto_analyze = @orig_auto_analyze; set session tokudb_analyze_in_background = @orig_in_background; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test index f1912faad0260..50434a79a0017 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test @@ -40,6 +40,7 @@ insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3); select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status; # lets flip to another connection +--source include/count_sessions.inc connect(conn1, localhost, root); # set up the DEBUG_SYNC point @@ -64,6 +65,7 @@ connection conn1; reap; connection default; disconnect conn1; +set DEBUG_SYNC = 'RESET'; drop table t1; set session tokudb_auto_analyze = @orig_auto_analyze; @@ -74,3 +76,4 @@ set session tokudb_analyze_time = @orig_time; set global tokudb_cardinality_scale_percent = @orig_scale_percent; set session default_storage_engine = @orig_default_storage_engine; set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager; +--source include/wait_until_count_sessions.inc diff --git a/storage/tokudb/mysql-test/tokudb_parts/t/partition_debug_sync_tokudb.test b/storage/tokudb/mysql-test/tokudb_parts/t/partition_debug_sync_tokudb.test index be14d8814f04e..f97235a0a2d3a 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/t/partition_debug_sync_tokudb.test +++ b/storage/tokudb/mysql-test/tokudb_parts/t/partition_debug_sync_tokudb.test @@ -56,7 +56,7 @@ partition by range (a) insert into t1 values (1), (11), (21), (33); SELECT * FROM t1; SHOW CREATE TABLE t1; ---replace_result #p# #P# #sp# #SP# +--source include/table_files_replace_pattern.inc --list_files $MYSQLD_DATADIR/test SET DEBUG_SYNC='before_open_in_get_all_tables SIGNAL parked WAIT_FOR open'; @@ -82,7 +82,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p0 INTO disconnect con1; connection default; --reap ---replace_result #p# #P# #sp# #SP# +--source include/table_files_replace_pattern.inc --list_files $MYSQLD_DATADIR/test SHOW CREATE TABLE t1; SELECT * FROM t1; diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc index 84f1c873a26c2..e518561527936 100644 --- a/storage/tokudb/tokudb_sysvars.cc +++ b/storage/tokudb/tokudb_sysvars.cc @@ -66,6 +66,7 @@ uint read_status_frequency = 0; my_bool strip_frm_data = FALSE; char* tmp_dir = NULL; uint write_status_frequency = 0; +my_bool dir_per_db = FALSE; char* version = (char*) TOKUDB_VERSION_STR; // file system reserve as a percentage of total disk space @@ -394,6 +395,18 @@ static MYSQL_SYSVAR_UINT( ~0U, 0); +static void tokudb_dir_per_db_update(THD* thd, + struct st_mysql_sys_var* sys_var, + void* var, const void* save) { + my_bool *value = (my_bool *) var; + *value = *(const my_bool *) save; + db_env->set_dir_per_db(db_env, *value); +} + +static MYSQL_SYSVAR_BOOL(dir_per_db, dir_per_db, + 0, "TokuDB store ft files in db directories", + NULL, tokudb_dir_per_db_update, FALSE); + #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL static MYSQL_SYSVAR_STR( gdb_path, @@ -935,6 +948,7 @@ st_mysql_sys_var* system_variables[] = { MYSQL_SYSVAR(tmp_dir), MYSQL_SYSVAR(version), MYSQL_SYSVAR(write_status_frequency), + MYSQL_SYSVAR(dir_per_db), #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL MYSQL_SYSVAR(gdb_path), diff --git a/storage/tokudb/tokudb_sysvars.h b/storage/tokudb/tokudb_sysvars.h index 70784fdcae3b1..c446e21257075 100644 --- a/storage/tokudb/tokudb_sysvars.h +++ b/storage/tokudb/tokudb_sysvars.h @@ -81,6 +81,7 @@ extern uint read_status_frequency; extern my_bool strip_frm_data; extern char* tmp_dir; extern uint write_status_frequency; +extern my_bool dir_per_db; extern char* version; #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL From 82ab92bd66eaaf951d49082a5c142759da59b137 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 25 Oct 2016 22:35:35 +0000 Subject: [PATCH 31/44] MDEV-10951 Field_newdate::cmp access violation The crash is caused by macro uint3korr() accessing memory (1 byte) past the end of allocated page. The macro is written such it reads 4 bytes instead of 3 and discards the value of the last byte. However, it is not always guaranteed that all uint3korr accesses will be valid (i.e that the caller allocates an extra byte after the value). In particular, the tree in Item_func_group_concat does not account for any extra bytes that it would need for comparison of keys in some cases (Field_newdate::cmp, Field_medium::cmp) The fix change uint3korr so it does not access extra bytes. --- include/byte_order_generic_x86.h | 10 ---------- include/byte_order_generic_x86_64.h | 8 -------- 2 files changed, 18 deletions(-) diff --git a/include/byte_order_generic_x86.h b/include/byte_order_generic_x86.h index 0a71a17829b70..a97dd0f43a37f 100644 --- a/include/byte_order_generic_x86.h +++ b/include/byte_order_generic_x86.h @@ -27,19 +27,9 @@ ((uint32) (uchar) (A)[0]))) #define sint4korr(A) (*((const long *) (A))) #define uint2korr(A) (*((const uint16 *) (A))) - -/* - Attention: Please, note, uint3korr reads 4 bytes (not 3)! - It means, that you have to provide enough allocated space. -*/ -#if defined(HAVE_valgrind) && !defined(_WIN32) #define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\ (((uint32) ((uchar) (A)[1])) << 8) +\ (((uint32) ((uchar) (A)[2])) << 16)) -#else -#define uint3korr(A) (long) (*((const unsigned int *) (A)) & 0xFFFFFF) -#endif - #define uint4korr(A) (*((const uint32 *) (A))) #define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\ (((uint32) ((uchar) (A)[1])) << 8) +\ diff --git a/include/byte_order_generic_x86_64.h b/include/byte_order_generic_x86_64.h index b6b0c5d8ea582..8c7493965a996 100644 --- a/include/byte_order_generic_x86_64.h +++ b/include/byte_order_generic_x86_64.h @@ -27,17 +27,9 @@ ((uint32) (uchar) (A)[0]))) #define sint4korr(A) (int32) (*((int32 *) (A))) #define uint2korr(A) (uint16) (*((uint16 *) (A))) -/* - Attention: Please, note, uint3korr reads 4 bytes (not 3)! - It means, that you have to provide enough allocated space. -*/ -#if defined(HAVE_valgrind) && !defined(_WIN32) #define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\ (((uint32) ((uchar) (A)[1])) << 8) +\ (((uint32) ((uchar) (A)[2])) << 16)) -#else -#define uint3korr(A) (uint32) (*((unsigned int *) (A)) & 0xFFFFFF) -#endif #define uint4korr(A) (uint32) (*((uint32 *) (A))) #define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\ (((uint32) ((uchar) (A)[1])) << 8) +\ From ad5b88a892d3e78c7192f5eb77094b46c600ab94 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Wed, 26 Oct 2016 09:26:34 +0000 Subject: [PATCH 32/44] Fix build error in XtraDB on Windows. coming from Percona's workaround for glibc bug http://bugs.mysql.com/bug.php?id=82886 --- storage/xtradb/os/os0thread.cc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/storage/xtradb/os/os0thread.cc b/storage/xtradb/os/os0thread.cc index 9729268348c5a..af826027efc36 100644 --- a/storage/xtradb/os/os0thread.cc +++ b/storage/xtradb/os/os0thread.cc @@ -220,10 +220,19 @@ void os_thread_join( os_thread_t thread) { + /*This function is currently only used to workaround glibc bug + described in http://bugs.mysql.com/bug.php?id=82886 + + On Windows, no workarounds are necessary, all threads + are "detached" upon thread exit (handle is closed), so we do + nothing. + */ +#ifndef _WIN32 int ret MY_ATTRIBUTE((unused)) = pthread_join(thread, NULL); /* Waiting on already-quit threads is allowed */ ut_ad(ret == 0 || ret == ESRCH); +#endif } /*****************************************************************//** From 9155cc7090998a5b28a1f502466640b08242c6e8 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Wed, 31 Aug 2016 15:57:02 +1000 Subject: [PATCH 33/44] MDEV-10292: Tokudb - PerconaFT - compile error in recent gcc The following directives to ignore warnings where in the PerconaFT build in tokudb. These generate errors when g++ ... -o xxx.so is used to compile are shared object. As these don't actually hit any warnings they have been removed. * -Wno-ignored-attributes * -Wno-pointer-bool-conversion Signed-off-by: Daniel Black --- storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake | 2 -- 1 file changed, 2 deletions(-) diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake index a7292a89d872f..769bdffa5d99c 100644 --- a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake +++ b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake @@ -98,9 +98,7 @@ set_cflags_if_supported( -Wno-error=address-of-array-temporary -Wno-error=tautological-constant-out-of-range-compare -Wno-error=maybe-uninitialized - -Wno-ignored-attributes -Wno-error=extern-c-compat - -Wno-pointer-bool-conversion -fno-rtti -fno-exceptions -Wno-error=nonnull-compare From a3c980b381ead0ea13df8314258c7a8d11fe5cd1 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Mon, 24 Oct 2016 15:26:11 +0400 Subject: [PATCH 34/44] MDEV-10824 - Crash in CREATE OR REPLACE TABLE t1 AS SELECT spfunc() Code flow hit incorrect branch while closing table instances before removal. This branch expects thread to hold open table instance, whereas CREATE OR REPLACE doesn't actually hold open table instance. Before CREATE OR REPLACE TABLE it was impossible to hit this condition in LTM_PRELOCKED mode, thus the problem didn't expose itself during DROP TABLE or DROP DATABASE. Fixed by adjusting condition to take into account LTM_PRELOCKED mode, which can be set during CREATE OR REPLACE TABLE. --- mysql-test/r/create_or_replace.result | 11 +++++++++++ mysql-test/t/create_or_replace.test | 12 ++++++++++++ sql/sql_table.cc | 3 ++- 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/create_or_replace.result b/mysql-test/r/create_or_replace.result index 3a894e9fcb140..a43dc2eaca447 100644 --- a/mysql-test/r/create_or_replace.result +++ b/mysql-test/r/create_or_replace.result @@ -442,3 +442,14 @@ KILL QUERY con_id; ERROR 70100: Query execution was interrupted drop table t1; DROP TABLE t2; +# +# MDEV-10824 - Crash in CREATE OR REPLACE TABLE t1 AS SELECT spfunc() +# +CREATE TABLE t1(a INT); +CREATE FUNCTION f1() RETURNS VARCHAR(16383) RETURN 'test'; +CREATE OR REPLACE TABLE t1 AS SELECT f1(); +LOCK TABLE t1 WRITE; +CREATE OR REPLACE TABLE t1 AS SELECT f1(); +UNLOCK TABLES; +DROP FUNCTION f1; +DROP TABLE t1; diff --git a/mysql-test/t/create_or_replace.test b/mysql-test/t/create_or_replace.test index 7bba2b341c043..b37417f39d0c8 100644 --- a/mysql-test/t/create_or_replace.test +++ b/mysql-test/t/create_or_replace.test @@ -386,3 +386,15 @@ drop table t1; # Cleanup # DROP TABLE t2; + +--echo # +--echo # MDEV-10824 - Crash in CREATE OR REPLACE TABLE t1 AS SELECT spfunc() +--echo # +CREATE TABLE t1(a INT); +CREATE FUNCTION f1() RETURNS VARCHAR(16383) RETURN 'test'; +CREATE OR REPLACE TABLE t1 AS SELECT f1(); +LOCK TABLE t1 WRITE; +CREATE OR REPLACE TABLE t1 AS SELECT f1(); +UNLOCK TABLES; +DROP FUNCTION f1; +DROP TABLE t1; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 7cf31ee4fe832..050a3383612a6 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2464,7 +2464,8 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, if (table_type && table_type != view_pseudo_hton) ha_lock_engine(thd, table_type); - if (thd->locked_tables_mode) + if (thd->locked_tables_mode == LTM_LOCK_TABLES || + thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES) { if (wait_while_table_is_used(thd, table->table, HA_EXTRA_NOT_USED)) { From 59a7bc35fc6526568e49f1087c022c5d01da088a Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Wed, 26 Oct 2016 14:09:11 +0400 Subject: [PATCH 35/44] Removed duplicate open_strategy assignments It is set in sql_yacc.yy. --- sql/sql_parse.cc | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index cbf723c1b493a..70511fcd849e3 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2857,12 +2857,6 @@ case SQLCOM_PREPARE: create_info.table_charset= 0; } - /* - For CREATE TABLE we should not open the table even if it exists. - If the table exists, we should either not create it or replace it - */ - lex->query_tables->open_strategy= TABLE_LIST::OPEN_STUB; - /* If we are a slave, we should add OR REPLACE if we don't have IF EXISTS. This will help a slave to recover from @@ -8225,12 +8219,6 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables, if (check_fk_parent_table_access(thd, &lex->create_info, &lex->alter_info, create_table->db)) goto err; - /* - For CREATE TABLE we should not open the table even if it exists. - If the table exists, we should either not create it or replace it - */ - lex->query_tables->open_strategy= TABLE_LIST::OPEN_STUB; - error= FALSE; err: From 5569ac00590ba139bbc575c20de4c682919721e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 25 Oct 2016 15:08:15 +0300 Subject: [PATCH 36/44] MDEV-11126: Crash while altering persistent virtual column Problem was that if old virtual column is computed and stored there was no check if new column is really virtual column. --- mysql-test/r/alter_table.result | 55 +++++++++++++++++++++++++++++++++ mysql-test/t/alter_table.test | 25 +++++++++++++++ sql/sql_table.cc | 1 + 3 files changed, 81 insertions(+) diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result index e572fdb197cc8..2e371ac6ae6e3 100644 --- a/mysql-test/r/alter_table.result +++ b/mysql-test/r/alter_table.result @@ -2021,3 +2021,58 @@ ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS event_id (event_id,market_id); Warnings: Note 1061 Multiple primary key defined DROP TABLE t1; +# +# MDEV-11126 Crash while altering persistent virtual column +# +CREATE TABLE `tab1` ( +`id` bigint(20) NOT NULL AUTO_INCREMENT, +`field2` set('option1','option2','option3','option4') NOT NULL, +`field3` set('option1','option2','option3','option4','option5') NOT NULL, +`field4` set('option1','option2','option3','option4') NOT NULL, +`field5` varchar(32) NOT NULL, +`field6` varchar(32) NOT NULL, +`field7` varchar(32) NOT NULL, +`field8` varchar(32) NOT NULL, +`field9` int(11) NOT NULL DEFAULT '1', +`field10` varchar(16) NOT NULL, +`field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1', +`v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT, +PRIMARY KEY (`id`) +) DEFAULT CHARSET=latin1; +ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128); +SHOW CREATE TABLE `tab1`; +Table Create Table +tab1 CREATE TABLE `tab1` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `field2` set('option1','option2','option3','option4') NOT NULL, + `field3` set('option1','option2','option3','option4','option5') NOT NULL, + `field4` set('option1','option2','option3','option4') NOT NULL, + `field5` varchar(32) NOT NULL, + `field6` varchar(32) NOT NULL, + `field7` varchar(32) NOT NULL, + `field8` varchar(32) NOT NULL, + `field9` int(11) NOT NULL DEFAULT '1', + `field10` varchar(16) NOT NULL, + `field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1', + `v_col` varchar(128) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT; +SHOW CREATE TABLE `tab1`; +Table Create Table +tab1 CREATE TABLE `tab1` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `field2` set('option1','option2','option3','option4') NOT NULL, + `field3` set('option1','option2','option3','option4','option5') NOT NULL, + `field4` set('option1','option2','option3','option4') NOT NULL, + `field5` varchar(32) NOT NULL, + `field6` varchar(32) NOT NULL, + `field7` varchar(32) NOT NULL, + `field8` varchar(32) NOT NULL, + `field9` int(11) NOT NULL DEFAULT '1', + `field10` varchar(16) NOT NULL, + `field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1', + `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT, + PRIMARY KEY (`id`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE `tab1`; diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test index 05d915ec478b5..d2b8a6082a623 100644 --- a/mysql-test/t/alter_table.test +++ b/mysql-test/t/alter_table.test @@ -1712,3 +1712,28 @@ CREATE TABLE t1 ( ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS event_id (event_id,market_id); DROP TABLE t1; +--echo # +--echo # MDEV-11126 Crash while altering persistent virtual column +--echo # + +CREATE TABLE `tab1` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `field2` set('option1','option2','option3','option4') NOT NULL, + `field3` set('option1','option2','option3','option4','option5') NOT NULL, + `field4` set('option1','option2','option3','option4') NOT NULL, + `field5` varchar(32) NOT NULL, + `field6` varchar(32) NOT NULL, + `field7` varchar(32) NOT NULL, + `field8` varchar(32) NOT NULL, + `field9` int(11) NOT NULL DEFAULT '1', + `field10` varchar(16) NOT NULL, + `field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1', + `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT, + PRIMARY KEY (`id`) +) DEFAULT CHARSET=latin1; + +ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128); +SHOW CREATE TABLE `tab1`; +ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT; +SHOW CREATE TABLE `tab1`; +DROP TABLE `tab1`; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 050a3383612a6..5d4c551d7306f 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -6274,6 +6274,7 @@ static bool fill_alter_inplace_info(THD *thd, (field->stored_in_db || field->vcol_info->is_in_partitioning_expr())) { if (is_equal == IS_EQUAL_NO || + !new_field->vcol_info || !field->vcol_info->is_equal(new_field->vcol_info)) ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_VCOL; else From 25932708b138aa89e5e9cea080e49d914f7bb724 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 26 Oct 2016 12:30:18 +0200 Subject: [PATCH 37/44] backport include/search_pattern_in_file.inc from 10.1 --- mysql-test/include/search_pattern_in_file.inc | 15 +++++++++------ mysql-test/r/lowercase_fs_on.result | 1 + mysql-test/r/named_pipe.result | 1 + mysql-test/r/view.result | 1 + mysql-test/r/wait_timeout_not_windows.result | 1 + .../innodb/r/innodb-change-buffer-recovery.result | 1 + mysql-test/suite/rpl/r/rpl_checksum.result | 1 + mysql-test/suite/rpl/r/rpl_gtid_errorlog.result | 2 ++ 8 files changed, 17 insertions(+), 6 deletions(-) diff --git a/mysql-test/include/search_pattern_in_file.inc b/mysql-test/include/search_pattern_in_file.inc index 0d09cdcd36efb..84237026ed06e 100644 --- a/mysql-test/include/search_pattern_in_file.inc +++ b/mysql-test/include/search_pattern_in_file.inc @@ -60,12 +60,12 @@ perl; use strict; - my $search_file= $ENV{'SEARCH_FILE'} or die "SEARCH_FILE not set"; - my $search_pattern= $ENV{'SEARCH_PATTERN'} or die "SEARCH_PATTERN not set"; - my $search_range= $ENV{'SEARCH_RANGE'}; + my $search_file= $ENV{'SEARCH_FILE'} or die "SEARCH_FILE not set"; + my $search_pattern= $ENV{'SEARCH_PATTERN'} or die "SEARCH_PATTERN not set"; + my $search_range= $ENV{'SEARCH_RANGE'}; my $file_content; $search_range= 50000 unless $search_range =~ /-?[0-9]+/; - open(FILE, "$search_file") or die("Unable to open '$search_file': $!\n"); + open(FILE, '<', $search_file) or die("Unable to open '$search_file': $!\n"); if ($search_range >= 0) { read(FILE, $file_content, $search_range, 0); } else { @@ -75,7 +75,10 @@ perl; read(FILE, $file_content, -$search_range, 0); } close(FILE); - if ( not $file_content =~ m{$search_pattern} ) { - die("# ERROR: The file '$search_file' does not contain the expected pattern $search_pattern\n->$file_content<-\n"); + $search_file =~ s{^.*?([^/\\]+)$}{$1}; + if ($file_content =~ m{$search_pattern}) { + print "FOUND /$search_pattern/ in $search_file\n" + } else { + print "NOT FOUND /$search_pattern/ in $search_file\n" } EOF diff --git a/mysql-test/r/lowercase_fs_on.result b/mysql-test/r/lowercase_fs_on.result index a090f46cfbf8b..b844b3f77dde4 100644 --- a/mysql-test/r/lowercase_fs_on.result +++ b/mysql-test/r/lowercase_fs_on.result @@ -1,3 +1,4 @@ # # Bug#20198490 : LOWER_CASE_TABLE_NAMES=0 ON WINDOWS LEADS TO PROBLEMS # +FOUND /\[ERROR\] The server option \'lower_case_table_names\' is configured to use case sensitive table names/ in my_restart.err diff --git a/mysql-test/r/named_pipe.result b/mysql-test/r/named_pipe.result index ddd48f0ba9166..43fb44beece1d 100644 --- a/mysql-test/r/named_pipe.result +++ b/mysql-test/r/named_pipe.result @@ -2154,3 +2154,4 @@ Privat (Private Nutzung) Mobilfunk Warnings: Warning 1052 Column 'kundentyp' in group statement is ambiguous drop table t1; +FOUND /\[ERROR\] Create named pipe failed/ in second-mysqld.err diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 52c379d03aff6..924b3a11fef1c 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -5432,6 +5432,7 @@ DROP FUNCTION f1; DROP VIEW v1; DROP TABLE t1, t2; create view v1 as select 1; +FOUND /mariadb-version/ in v1.frm drop view v1; # # MDEV-7260: Crash in get_best_combination when executing multi-table diff --git a/mysql-test/r/wait_timeout_not_windows.result b/mysql-test/r/wait_timeout_not_windows.result index df70aa9922160..867787a8ed340 100644 --- a/mysql-test/r/wait_timeout_not_windows.result +++ b/mysql-test/r/wait_timeout_not_windows.result @@ -1,3 +1,4 @@ set global log_warnings=2; set @@wait_timeout=1; +FOUND /Aborted.*Got timeout reading communication packets/ in mysqld.1.err set global log_warnings=@@log_warnings; diff --git a/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result b/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result index cc2a0373444f8..07e13008e2718 100644 --- a/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result +++ b/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result @@ -33,6 +33,7 @@ INSERT INTO t1 VALUES(1,'X',1); SET DEBUG_DBUG='+d,crash_after_log_ibuf_upd_inplace'; SELECT b FROM t1 LIMIT 3; ERROR HY000: Lost connection to MySQL server during query +FOUND /Wrote log record for ibuf update in place operation/ in my_restart.err CHECK TABLE t1; Table Op Msg_type Msg_text test.t1 check status OK diff --git a/mysql-test/suite/rpl/r/rpl_checksum.result b/mysql-test/suite/rpl/r/rpl_checksum.result index 94d215e596a92..9e37fbf40b18e 100644 --- a/mysql-test/suite/rpl/r/rpl_checksum.result +++ b/mysql-test/suite/rpl/r/rpl_checksum.result @@ -143,6 +143,7 @@ SET debug_dbug= @old_dbug; INSERT INTO t4 VALUES (2); include/wait_for_slave_sql_error.inc [errno=1590] Last_SQL_Error = 'The incident LOST_EVENTS occurred on the master. Message: error writing to the binary log' +FOUND /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590/ in mysqld.2.err SELECT * FROM t4 ORDER BY a; a 1 diff --git a/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result b/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result index 204615201d95d..e247ea9c2a714 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result @@ -38,5 +38,7 @@ a 3 4 5 +FOUND /Slave SQL: Error 'Duplicate entry .* on query\. .*Query: '.*', Gtid 0-1-100, Internal MariaDB error code:|Slave SQL: Could not execute Write_rows.*table test.t1; Duplicate entry.*, Gtid 0-1-100, Internal MariaDB error/ in mysqld.2.err +FOUND /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: , Internal MariaDB error code: 1590/ in mysqld.2.err DROP TABLE t1; include/rpl_end.inc From 22490a0d709d0c53da94799accb038bf270ed411 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 26 Oct 2016 13:26:43 +0200 Subject: [PATCH 38/44] MDEV-8345 STOP SLAVE should not cause an ERROR to be logged to the error log cherry-pick from 5.7: commit 6b24763 Author: Manish Kumar Date: Tue Mar 27 13:10:42 2012 +0530 BUG#12977988 - ON STOP SLAVE: ERROR READING PACKET FROM SERVER: LOST CONNECTION TO MYSQL SERVER BUG#11761457 - ERROR 2013 + "ERROR READING RELAY LOG EVENT" ON STOP SLAVEBUG#12977988 - ON STOP SLAVE: ERROR READING PACKET FROM SERVER: LOST CONNECTION TO MYSQL SERVER --- .../suite/rpl/r/rpl_stop_slave_error.result | 6 ++++++ .../suite/rpl/t/rpl_stop_slave_error-slave.opt | 1 + .../suite/rpl/t/rpl_stop_slave_error.test | 17 +++++++++++++++++ sql/slave.cc | 9 +++++++-- 4 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 mysql-test/suite/rpl/r/rpl_stop_slave_error.result create mode 100644 mysql-test/suite/rpl/t/rpl_stop_slave_error-slave.opt create mode 100644 mysql-test/suite/rpl/t/rpl_stop_slave_error.test diff --git a/mysql-test/suite/rpl/r/rpl_stop_slave_error.result b/mysql-test/suite/rpl/r/rpl_stop_slave_error.result new file mode 100644 index 0000000000000..2bd372a9a915b --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_stop_slave_error.result @@ -0,0 +1,6 @@ +include/master-slave.inc +[connection master] +include/stop_slave.inc +NOT FOUND /Error reading packet from server: Lost connection/ in slave_log.err +include/start_slave.inc +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_stop_slave_error-slave.opt b/mysql-test/suite/rpl/t/rpl_stop_slave_error-slave.opt new file mode 100644 index 0000000000000..32c4527a91575 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_stop_slave_error-slave.opt @@ -0,0 +1 @@ +--log-error=$MYSQLTEST_VARDIR/tmp/slave_log.err diff --git a/mysql-test/suite/rpl/t/rpl_stop_slave_error.test b/mysql-test/suite/rpl/t/rpl_stop_slave_error.test new file mode 100644 index 0000000000000..a88981c15c4fd --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_stop_slave_error.test @@ -0,0 +1,17 @@ +# +# MDEV-8345 STOP SLAVE should not cause an ERROR to be logged to the error log +# +source include/have_binlog_format_mixed.inc; # don't repeat the test three times +source include/master-slave.inc; + +connection master; +sync_slave_with_master; +source include/stop_slave.inc; +let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/slave_log.err; +let SEARCH_PATTERN=Error reading packet from server: Lost connection; +let SEARCH_RANGE= -50000; +source include/search_pattern_in_file.inc; + +source include/start_slave.inc; +source include/rpl_end.inc; + diff --git a/sql/slave.cc b/sql/slave.cc index 6dc1a66a2ac90..a124ca6be7e34 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -3120,8 +3120,13 @@ static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings) *suppress_warnings= TRUE; } else - sql_print_error("Error reading packet from server: %s ( server_errno=%d)", - mysql_error(mysql), mysql_errno(mysql)); + { + if (!mi->rli.abort_slave) + { + sql_print_error("Error reading packet from server: %s (server_errno=%d)", + mysql_error(mysql), mysql_errno(mysql)); + } + } DBUG_RETURN(packet_error); } From 26b87c332ff78a7aca04930ad86fbf7acc793222 Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Thu, 27 Oct 2016 00:04:26 +0400 Subject: [PATCH 39/44] MDEV-10846 Running mysqldump backup twice returns error: Table 'mysql.proc' doesn't exist. The mysql_rm_db() doesn't seem to expect the 'mysql' database to be deleted. Checks for that added. Also fixed the bug MDEV-11105 Table named 'db' has weird side effect. The db.opt file now removed separately. --- mysql-test/r/drop.result | 6 ++++++ mysql-test/t/drop.test | 9 +++++++++ sql/sql_db.cc | 26 +++++++++++++++++++++----- 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/drop.result b/mysql-test/r/drop.result index c23ffbe327b63..c25ae9e305517 100644 --- a/mysql-test/r/drop.result +++ b/mysql-test/r/drop.result @@ -209,3 +209,9 @@ INSERT INTO table1 VALUES (1); ERROR 42S02: Unknown table 't.notable' DROP TABLE table1,table2; # End BUG#34750 +# +# MDEV-11105 Table named 'db' has weird side effect. +# +CREATE DATABASE mysqltest; +CREATE TABLE mysqltest.db(id INT); +DROP DATABASE mysqltest; diff --git a/mysql-test/t/drop.test b/mysql-test/t/drop.test index d9784bc819a86..a3e96953bac02 100644 --- a/mysql-test/t/drop.test +++ b/mysql-test/t/drop.test @@ -313,3 +313,12 @@ INSERT INTO table1 VALUES (1); DROP TABLE table1,table2; --echo # End BUG#34750 + +--echo # +--echo # MDEV-11105 Table named 'db' has weird side effect. +--echo # + +CREATE DATABASE mysqltest; +CREATE TABLE mysqltest.db(id INT); +DROP DATABASE mysqltest; + diff --git a/sql/sql_db.cc b/sql/sql_db.cc index e89c3d9e74566..0a3ff64113f04 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -784,7 +784,7 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) { ulong deleted_tables= 0; - bool error= true; + bool error= true, rm_mysql_schema; char path[FN_REFLEN + 16]; MY_DIR *dirp; uint length; @@ -809,6 +809,18 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) length= build_table_filename(path, sizeof(path) - 1, db, "", "", 0); strmov(path+length, MY_DB_OPT_FILE); // Append db option file name del_dbopt(path); // Remove dboption hash entry + /* + Now remove the db.opt file. + The 'find_db_tables_and_rm_known_files' doesn't remove this file + if there exists a table with the name 'db', so let's just do it + separately. We know this file exists and needs to be deleted anyway. + */ + if (my_delete_with_symlink(path, MYF(0)) && my_errno != ENOENT) + { + my_error(EE_DELETE, MYF(0), path, my_errno); + DBUG_RETURN(true); + } + path[length]= '\0'; // Remove file name /* See if the directory exists */ @@ -835,7 +847,8 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) Disable drop of enabled log tables, must be done before name locking. This check is only needed if we are dropping the "mysql" database. */ - if ((my_strcasecmp(system_charset_info, MYSQL_SCHEMA_NAME.str, db) == 0)) + if ((rm_mysql_schema= + (my_strcasecmp(system_charset_info, MYSQL_SCHEMA_NAME.str, db) == 0))) { for (table= tables; table; table= table->next_local) if (check_if_log_table(table, TRUE, "DROP")) @@ -848,7 +861,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) lock_db_routines(thd, dbnorm)) goto exit; - if (!in_bootstrap) + if (!in_bootstrap && !rm_mysql_schema) { for (table= tables; table; table= table->next_local) { @@ -893,10 +906,13 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) ha_drop_database(path); tmp_disable_binlog(thd); query_cache_invalidate1(thd, dbnorm); - (void) sp_drop_db_routines(thd, dbnorm); /* @todo Do not ignore errors */ + if (!rm_mysql_schema) + { + (void) sp_drop_db_routines(thd, dbnorm); /* @todo Do not ignore errors */ #ifdef HAVE_EVENT_SCHEDULER - Events::drop_schema_events(thd, dbnorm); + Events::drop_schema_events(thd, dbnorm); #endif + } reenable_binlog(thd); /* From 9d4a0dde0ae3e0d46b4c5c0967c25862d467e94e Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 24 Oct 2016 10:15:11 -0700 Subject: [PATCH 40/44] Fixed bug mdev-11096. 1. When min/max value is provided the null flag for it must be set to 0 in the bitmap Culumn_statistics::column_stat_nulls. 2. When the calculation of the selectivity of the range condition over a column requires min and max values for the column then we have to check that these values are provided. --- mysql-test/r/selectivity.result | 24 ++++++++++++++++++++++ mysql-test/r/selectivity_innodb.result | 28 ++++++++++++++++++++++++-- mysql-test/t/selectivity.test | 19 +++++++++++++++++ sql/sql_statistics.cc | 15 ++++---------- sql/sql_statistics.h | 5 +++++ 5 files changed, 78 insertions(+), 13 deletions(-) diff --git a/mysql-test/r/selectivity.result b/mysql-test/r/selectivity.result index 620bdc6bd50c2..c2364e11ceb36 100644 --- a/mysql-test/r/selectivity.result +++ b/mysql-test/r/selectivity.result @@ -1446,3 +1446,27 @@ a b i set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; DROP TABLE t1,t2; set use_stat_tables=@save_use_stat_tables; +# +# Bug mdev-11096: range condition over column without statistical data +# +set use_stat_tables='preferably'; +set optimizer_use_condition_selectivity=3; +create table t1(col1 char(32)); +insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h'); +analyze table t1 persistent for columns () indexes (); +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +explain extended +select * from t1 where col1 > 'b' and col1 < 'e'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where ((`test`.`t1`.`col1` > 'b') and (`test`.`t1`.`col1` < 'e')) +select * from t1 where col1 > 'b' and col1 < 'e'; +col1 +c +d +drop table t1; +set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; +set use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/r/selectivity_innodb.result b/mysql-test/r/selectivity_innodb.result index 0acbb465ba852..882f51515b2e8 100644 --- a/mysql-test/r/selectivity_innodb.result +++ b/mysql-test/r/selectivity_innodb.result @@ -802,9 +802,9 @@ insert into t2 values (2),(3); explain extended select * from t1 where a in ( select b from t2 ) AND ( a > 3 ); id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 1 0.00 Using where +1 PRIMARY t1 ALL NULL NULL NULL NULL 1 100.00 Using where 1 PRIMARY eq_ref distinct_key distinct_key 4 func 1 100.00 -2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 0.00 +2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 100.00 Warnings: Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where ((`test`.`t1`.`a` > 3)) select * from t1 where a in ( select b from t2 ) AND ( a > 3 ); @@ -1450,6 +1450,30 @@ a b i set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; DROP TABLE t1,t2; set use_stat_tables=@save_use_stat_tables; +# +# Bug mdev-11096: range condition over column without statistical data +# +set use_stat_tables='preferably'; +set optimizer_use_condition_selectivity=3; +create table t1(col1 char(32)); +insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h'); +analyze table t1 persistent for columns () indexes (); +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +explain extended +select * from t1 where col1 > 'b' and col1 < 'e'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where ((`test`.`t1`.`col1` > 'b') and (`test`.`t1`.`col1` < 'e')) +select * from t1 where col1 > 'b' and col1 < 'e'; +col1 +c +d +drop table t1; +set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; +set use_stat_tables=@save_use_stat_tables; set optimizer_switch=@save_optimizer_switch_for_selectivity_test; set @tmp_ust= @@use_stat_tables; set @tmp_oucs= @@optimizer_use_condition_selectivity; diff --git a/mysql-test/t/selectivity.test b/mysql-test/t/selectivity.test index c46ff69295fa7..1321046009e2d 100644 --- a/mysql-test/t/selectivity.test +++ b/mysql-test/t/selectivity.test @@ -970,6 +970,25 @@ set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivit DROP TABLE t1,t2; +set use_stat_tables=@save_use_stat_tables; + +--echo # +--echo # Bug mdev-11096: range condition over column without statistical data +--echo # + +set use_stat_tables='preferably'; +set optimizer_use_condition_selectivity=3; +create table t1(col1 char(32)); +insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h'); +analyze table t1 persistent for columns () indexes (); + +explain extended +select * from t1 where col1 > 'b' and col1 < 'e'; +select * from t1 where col1 > 'b' and col1 < 'e'; + +drop table t1; + +set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; set use_stat_tables=@save_use_stat_tables; diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index 47a5a40ebeb44..70080a6b4f162 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -1003,11 +1003,13 @@ class Column_stat: public Stat_table switch (i) { case COLUMN_STAT_MIN_VALUE: + table_field->read_stats->min_value->set_notnull(); stat_field->val_str(&val); table_field->read_stats->min_value->store(val.ptr(), val.length(), &my_charset_bin); break; case COLUMN_STAT_MAX_VALUE: + table_field->read_stats->max_value->set_notnull(); stat_field->val_str(&val); table_field->read_stats->max_value->store(val.ptr(), val.length(), &my_charset_bin); @@ -3659,17 +3661,8 @@ double get_column_range_cardinality(Field *field, { double avg_frequency= col_stats->get_avg_frequency(); res= avg_frequency; - /* - psergey-todo: what does check for min_value, max_value mean? - min/max_value are set to NULL in alloc_statistics_for_table() and - alloc_statistics_for_table_share(). Both functions will immediately - call create_min_max_statistical_fields_for_table and - create_min_max_statistical_fields_for_table_share() respectively, - which will set min/max_value to be valid pointers, unless OOM - occurs. - */ if (avg_frequency > 1.0 + 0.000001 && - col_stats->min_value && col_stats->max_value) + col_stats->min_max_values_are_provided()) { Histogram *hist= &col_stats->histogram; if (hist->is_available()) @@ -3692,7 +3685,7 @@ double get_column_range_cardinality(Field *field, } else { - if (col_stats->min_value && col_stats->max_value) + if (col_stats->min_max_values_are_provided()) { double sel, min_mp_pos, max_mp_pos; diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h index 46e5cef22d1ac..8e5f8107849a7 100644 --- a/sql/sql_statistics.h +++ b/sql/sql_statistics.h @@ -388,6 +388,11 @@ class Column_statistics avg_frequency= (ulong) (val * Scale_factor_avg_frequency); } + bool min_max_values_are_provided() + { + return !is_null(COLUMN_STAT_MIN_VALUE) && + !is_null(COLUMN_STAT_MIN_VALUE); + } }; From d451d772fdaa554eeb96ae12f96c3a32a6fd4d66 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Wed, 26 Oct 2016 10:59:38 -0700 Subject: [PATCH 41/44] Fixed bug mdev-9628. In the function create_key_parts_for_pseudo_indexes() the key part structures of pseudo-indexes created for BLOB fields were set incorrectly. Also the key parts for long fields must be 'truncated' up to the maximum length acceptable for key parts. --- mysql-test/r/selectivity.result | 47 ++++++++++++++ mysql-test/r/selectivity_innodb.result | 85 ++++++++++++++++++++++++++ mysql-test/t/selectivity.test | 33 ++++++++++ mysql-test/t/selectivity_innodb.test | 25 ++++++++ sql/opt_range.cc | 9 ++- 5 files changed, 198 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/selectivity.result b/mysql-test/r/selectivity.result index c2364e11ceb36..8fb5cd17c518a 100644 --- a/mysql-test/r/selectivity.result +++ b/mysql-test/r/selectivity.result @@ -1470,3 +1470,50 @@ d drop table t1; set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; set use_stat_tables=@save_use_stat_tables; +# +# Bug mdev-9628: unindexed blob column without min-max statistics +# with optimizer_use_condition_selectivity=3 +# +set use_stat_tables='preferably'; +set optimizer_use_condition_selectivity=3; +create table t1(col1 char(32)); +insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h'); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +create table t2(col1 text); +insert into t2 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h'); +analyze table t2; +Table Op Msg_type Msg_text +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +select * from t1 where col1 > 'b' and col1 < 'd'; +col1 +c +explain extended +select * from t1 where col1 > 'b' and col1 < 'd'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 8 28.57 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where ((`test`.`t1`.`col1` > 'b') and (`test`.`t1`.`col1` < 'd')) +select * from t2 where col1 > 'b' and col1 < 'd'; +col1 +c +explain extended +select * from t2 where col1 > 'b' and col1 < 'd'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where +Warnings: +Note 1003 select `test`.`t2`.`col1` AS `col1` from `test`.`t2` where ((`test`.`t2`.`col1` > 'b') and (`test`.`t2`.`col1` < 'd')) +select * from t2 where col1 < 'b' and col1 > 'd'; +col1 +explain extended +select * from t2 where col1 < 'b' and col1 > 'd'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +Warnings: +Note 1003 select `test`.`t2`.`col1` AS `col1` from `test`.`t2` where 0 +drop table t1,t2; +set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; +set use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/r/selectivity_innodb.result b/mysql-test/r/selectivity_innodb.result index 882f51515b2e8..3d15131dbb55e 100644 --- a/mysql-test/r/selectivity_innodb.result +++ b/mysql-test/r/selectivity_innodb.result @@ -1474,6 +1474,53 @@ d drop table t1; set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; set use_stat_tables=@save_use_stat_tables; +# +# Bug mdev-9628: unindexed blob column without min-max statistics +# with optimizer_use_condition_selectivity=3 +# +set use_stat_tables='preferably'; +set optimizer_use_condition_selectivity=3; +create table t1(col1 char(32)); +insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h'); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +create table t2(col1 text); +insert into t2 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h'); +analyze table t2; +Table Op Msg_type Msg_text +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +select * from t1 where col1 > 'b' and col1 < 'd'; +col1 +c +explain extended +select * from t1 where col1 > 'b' and col1 < 'd'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 8 28.57 Using where +Warnings: +Note 1003 select `test`.`t1`.`col1` AS `col1` from `test`.`t1` where ((`test`.`t1`.`col1` > 'b') and (`test`.`t1`.`col1` < 'd')) +select * from t2 where col1 > 'b' and col1 < 'd'; +col1 +c +explain extended +select * from t2 where col1 > 'b' and col1 < 'd'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where +Warnings: +Note 1003 select `test`.`t2`.`col1` AS `col1` from `test`.`t2` where ((`test`.`t2`.`col1` > 'b') and (`test`.`t2`.`col1` < 'd')) +select * from t2 where col1 < 'b' and col1 > 'd'; +col1 +explain extended +select * from t2 where col1 < 'b' and col1 > 'd'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +Warnings: +Note 1003 select `test`.`t2`.`col1` AS `col1` from `test`.`t2` where 0 +drop table t1,t2; +set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; +set use_stat_tables=@save_use_stat_tables; set optimizer_switch=@save_optimizer_switch_for_selectivity_test; set @tmp_ust= @@use_stat_tables; set @tmp_oucs= @@optimizer_use_condition_selectivity; @@ -1560,6 +1607,44 @@ where t1.child_user_id=t3.id and t1.child_group_id is null and t2.lower_group_na parent_id child_group_id child_user_id id lower_group_name directory_id id drop table t1,t2,t3; # +# MDEV-9187: duplicate of bug mdev-9628 +# +set use_stat_tables = preferably; +set optimizer_use_condition_selectivity=3; +CREATE TABLE t1 (f1 char(32)) ENGINE=InnoDB; +INSERT INTO t1 VALUES ('foo'),('bar'),('qux'); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +SELECT * FROM t1 WHERE f1 < 'm'; +f1 +foo +bar +EXPLAIN EXTENDED +SELECT * FROM t1 WHERE f1 < 'm'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 72.09 Using where +Warnings: +Note 1003 select `test`.`t1`.`f1` AS `f1` from `test`.`t1` where (`test`.`t1`.`f1` < 'm') +CREATE TABLE t2 (f1 TEXT) ENGINE=InnoDB; +INSERT INTO t2 VALUES ('foo'),('bar'),('qux'); +ANALYZE TABLE t2; +Table Op Msg_type Msg_text +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +SELECT * FROM t2 WHERE f1 <> 'qux'; +f1 +foo +bar +EXPLAIN EXTENDED +SELECT * FROM t2 WHERE f1 <> 'qux'; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 select `test`.`t2`.`f1` AS `f1` from `test`.`t2` where (`test`.`t2`.`f1` <> 'qux') +DROP TABLE t1,t2; +# # End of 10.0 tests # set use_stat_tables= @tmp_ust; diff --git a/mysql-test/t/selectivity.test b/mysql-test/t/selectivity.test index 1321046009e2d..8efc5216ba09c 100644 --- a/mysql-test/t/selectivity.test +++ b/mysql-test/t/selectivity.test @@ -992,3 +992,36 @@ drop table t1; set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; set use_stat_tables=@save_use_stat_tables; +--echo # +--echo # Bug mdev-9628: unindexed blob column without min-max statistics +--echo # with optimizer_use_condition_selectivity=3 +--echo # + +set use_stat_tables='preferably'; +set optimizer_use_condition_selectivity=3; + +create table t1(col1 char(32)); +insert into t1 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h'); +analyze table t1; + +create table t2(col1 text); +insert into t2 values ('a'),('b'),('c'),('d'), ('e'),('f'),('g'),('h'); +analyze table t2; + +select * from t1 where col1 > 'b' and col1 < 'd'; +explain extended +select * from t1 where col1 > 'b' and col1 < 'd'; + +select * from t2 where col1 > 'b' and col1 < 'd'; +explain extended +select * from t2 where col1 > 'b' and col1 < 'd'; + +select * from t2 where col1 < 'b' and col1 > 'd'; +explain extended +select * from t2 where col1 < 'b' and col1 > 'd'; + +drop table t1,t2; + +set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; +set use_stat_tables=@save_use_stat_tables; + diff --git a/mysql-test/t/selectivity_innodb.test b/mysql-test/t/selectivity_innodb.test index d6a77eac60004..25aa0abbc3b45 100644 --- a/mysql-test/t/selectivity_innodb.test +++ b/mysql-test/t/selectivity_innodb.test @@ -109,6 +109,31 @@ where t1.child_user_id=t3.id and t1.child_group_id is null and t2.lower_group_na drop table t1,t2,t3; +--echo # +--echo # MDEV-9187: duplicate of bug mdev-9628 +--echo # + +set use_stat_tables = preferably; +set optimizer_use_condition_selectivity=3; + +CREATE TABLE t1 (f1 char(32)) ENGINE=InnoDB; +INSERT INTO t1 VALUES ('foo'),('bar'),('qux'); +ANALYZE TABLE t1; + +SELECT * FROM t1 WHERE f1 < 'm'; +EXPLAIN EXTENDED +SELECT * FROM t1 WHERE f1 < 'm'; + +CREATE TABLE t2 (f1 TEXT) ENGINE=InnoDB; +INSERT INTO t2 VALUES ('foo'),('bar'),('qux'); +ANALYZE TABLE t2; + +SELECT * FROM t2 WHERE f1 <> 'qux'; +EXPLAIN EXTENDED +SELECT * FROM t2 WHERE f1 <> 'qux'; + +DROP TABLE t1,t2; + --echo # --echo # End of 10.0 tests --echo # diff --git a/sql/opt_range.cc b/sql/opt_range.cc index e0ca43e6d726f..5d6891a1edf53 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -3345,9 +3345,16 @@ bool create_key_parts_for_pseudo_indexes(RANGE_OPT_PARAM *param, { Field *field= *field_ptr; uint16 store_length; + uint16 max_key_part_length= (uint16) table->file->max_key_part_length(); key_part->key= keys; key_part->part= 0; - key_part->length= (uint16) field->key_length(); + if (field->flags & BLOB_FLAG) + key_part->length= max_key_part_length; + else + { + key_part->length= (uint16) field->key_length(); + set_if_smaller(key_part->length, max_key_part_length); + } store_length= key_part->length; if (field->real_maybe_null()) store_length+= HA_KEY_NULL_LENGTH; From a0795655ab8d1cbcd88a155ba72ebf93864f82dc Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 27 Oct 2016 12:23:31 +0200 Subject: [PATCH 42/44] MDEV-10846 Running mysqldump backup twice returns error: Table 'mysql.proc' doesn't exist. Update test results after 26b87c3 --- mysql-test/r/mysqldump.result | 3 --- 1 file changed, 3 deletions(-) diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index b6de51c8b03f2..cb3c28f42cdf2 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -5236,9 +5236,6 @@ SET @@global.log_output="TABLE"; SET @@global.general_log='OFF'; SET @@global.slow_query_log='OFF'; DROP DATABASE mysql; -Warnings: -Error 1146 Table 'mysql.proc' doesn't exist -Error 1146 Table 'mysql.event' doesn't exist SHOW CREATE TABLE mysql.general_log; Table Create Table general_log CREATE TABLE `general_log` ( From eca8c324e9a02f530853580991b11b587f54b24a Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Thu, 27 Oct 2016 19:07:55 +0200 Subject: [PATCH 43/44] Typo fixed. --- sql/item_subselect.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 5cdfa427997bc..e70922bb5d3ca 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -2620,8 +2620,8 @@ static bool check_equality_for_exist2in(Item_func *func, args[0]->all_used_tables() == OUTER_REF_TABLE_BIT) { /* It is Item_field or Item_direct_view_ref) */ - DBUG_ASSERT(args[0]->type() == Item::FIELD_ITEM || - args[0]->type() == Item::REF_ITEM); + DBUG_ASSERT(args[1]->type() == Item::FIELD_ITEM || + args[1]->type() == Item::REF_ITEM); *local_field= (Item_ident *)args[1]; *outer_exp= args[0]; return TRUE; From 7196691b44b65e12cb5cca6f17c8d0f091eb443f Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Tue, 1 Nov 2016 17:20:12 -0400 Subject: [PATCH 44/44] Fix/disable some failing galera tests. --- mysql-test/suite/galera/disabled.def | 4 +++- .../include/auto_increment_offset_restore.inc | 6 ++++++ .../include/auto_increment_offset_save.inc | 8 ++++++++ .../galera/r/galera_sst_xtrabackup-v2.result | 3 ++- mysql-test/suite/galera/t/galera#414.test | 1 + .../galera/t/galera_sst_xtrabackup-v2.test | 10 ++++++++++ .../suite/galera/t/galera_wan_restart_ist.test | 17 +++++++++++++++-- mysql-test/suite/galera_3nodes/disabled.def | 2 ++ 8 files changed, 47 insertions(+), 4 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 4aa15d27661f6..771053778d3aa 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -28,4 +28,6 @@ galera_flush : mysql-wsrep/issues/229 galera_transaction_read_only : mysql-wsrep/issues/229 galera_gcs_fragment : Incorrect arguments to SET galera_flush_local : Fails sporadically -galera_binlog_stmt_autoinc : TODO: investigate \ No newline at end of file +galera_binlog_stmt_autoinc : TODO: investigate +galera_sst_xtrabackup-v2-options : TODO: Fix test case +mysql-wsrep#33 : TODO: investigate diff --git a/mysql-test/suite/galera/include/auto_increment_offset_restore.inc b/mysql-test/suite/galera/include/auto_increment_offset_restore.inc index 6218dfd6f2c56..1248ed100ca4f 100644 --- a/mysql-test/suite/galera/include/auto_increment_offset_restore.inc +++ b/mysql-test/suite/galera/include/auto_increment_offset_restore.inc @@ -32,4 +32,10 @@ if ($node_3) --connection $node_3 --eval SET @@global.auto_increment_offset = $auto_increment_offset_node_3; } + +if ($node_4) +{ +--connection $node_4 +--eval SET @@global.auto_increment_offset = $auto_increment_offset_node_4; +} --enable_query_log diff --git a/mysql-test/suite/galera/include/auto_increment_offset_save.inc b/mysql-test/suite/galera/include/auto_increment_offset_save.inc index 3c4db3f381cd1..216c689ec8cc7 100644 --- a/mysql-test/suite/galera/include/auto_increment_offset_save.inc +++ b/mysql-test/suite/galera/include/auto_increment_offset_save.inc @@ -13,6 +13,8 @@ # Connection handle for 2nd node # $node_3 (optional) # Connection handle for 3rd node +# $node_4 (optional) +# Connection handle for 4th node if (!$node_1) { @@ -35,3 +37,9 @@ if ($node_3) let $auto_increment_offset_node_3 = `SELECT @@global.auto_increment_offset`; } +if ($node_4) +{ + --connection $node_4 + let $auto_increment_offset_node_4 = `SELECT @@global.auto_increment_offset`; +} + diff --git a/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result b/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result index 750d73b615f55..df2d9190a4b56 100644 --- a/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result +++ b/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result @@ -277,7 +277,7 @@ INSERT INTO t1 VALUES ('node2_committed_before'); INSERT INTO t1 VALUES ('node2_committed_before'); INSERT INTO t1 VALUES ('node2_committed_before'); COMMIT; -SET GLOBAL debug = 'd,sync.alter_opened_table'; +SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; ALTER TABLE t1 ADD COLUMN f2 INTEGER; SET wsrep_sync_wait = 0; Killing server ... @@ -356,3 +356,4 @@ COUNT(*) = 0 DROP TABLE t1; COMMIT; SET AUTOCOMMIT=ON; +SET GLOBAL debug_dbug = $debug_orig; diff --git a/mysql-test/suite/galera/t/galera#414.test b/mysql-test/suite/galera/t/galera#414.test index b426e6510b645..0ee6dcac700bd 100644 --- a/mysql-test/suite/galera/t/galera#414.test +++ b/mysql-test/suite/galera/t/galera#414.test @@ -3,6 +3,7 @@ # --source include/big_test.inc +--source include/have_innodb.inc --source include/galera_cluster.inc # We perform the shutdown/restart sequence in here. If there was a crash during shutdown, MTR will detect it diff --git a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test index c6823795e59d0..aac6822170a86 100644 --- a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test +++ b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test @@ -2,8 +2,18 @@ --source include/galera_cluster.inc --source include/have_innodb.inc +# Save original auto_increment_offset values. +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc + --source suite/galera/include/galera_st_shutdown_slave.inc --source suite/galera/include/galera_st_clean_slave.inc --source suite/galera/include/galera_st_kill_slave.inc --source suite/galera/include/galera_st_kill_slave_ddl.inc + +# Restore original auto_increment_offset values. +--source include/auto_increment_offset_restore.inc + +--source include/galera_end.inc diff --git a/mysql-test/suite/galera/t/galera_wan_restart_ist.test b/mysql-test/suite/galera/t/galera_wan_restart_ist.test index 42f63df3acc63..1cf5d4c7f74ee 100644 --- a/mysql-test/suite/galera/t/galera_wan_restart_ist.test +++ b/mysql-test/suite/galera/t/galera_wan_restart_ist.test @@ -12,6 +12,16 @@ --source include/galera_cluster.inc --source include/have_innodb.inc +--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3 +--connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4 + +# Save original auto_increment_offset values. +--let $node_1=node_1 +--let $node_2=node_2 +--let $node_3=node_3 +--let $node_4=node_4 +--source include/auto_increment_offset_save.inc + SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; --connection node_1 @@ -21,11 +31,9 @@ INSERT INTO t1 VALUES (1); --connection node_2 INSERT INTO t1 VALUES (2); ---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3 --connection node_3 INSERT INTO t1 VALUES (3); ---connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4 --connection node_4 INSERT INTO t1 VALUES (4); @@ -146,3 +154,8 @@ CALL mtr.add_suppression("Action message in non-primary configuration from membe --connection node_4 CALL mtr.add_suppression("Action message in non-primary configuration from member 0"); + +# Restore original auto_increment_offset values. +--source include/auto_increment_offset_restore.inc + +--source include/galera_end.inc diff --git a/mysql-test/suite/galera_3nodes/disabled.def b/mysql-test/suite/galera_3nodes/disabled.def index fb23a81bfb8b8..ca55c41ff72f4 100644 --- a/mysql-test/suite/galera_3nodes/disabled.def +++ b/mysql-test/suite/galera_3nodes/disabled.def @@ -3,3 +3,5 @@ galera_evs_suspect_timeout : TODO: investigate galera_innobackupex_backup : TODO: investigate galera_slave_options_do :MDEV-8798 galera_slave_options_ignore : MDEV-8798 +galera_pc_bootstrap : TODO: Investigate: Timeout in wait_condition.inc +galera_pc_weight : Test times out