/***************************************************
** Title: Sync Config Settings between Servers **
** Author: Eric Zierdt **
** Date: 03/10/2011 **
***************************************************/
/**************************************
** STEP 1: Turn on Advanced Options **
**************************************/
sp_configure 'show advanced options', 1
GO
RECONFIGURE
GO
/*******************************************
** STEP 2: Populate the values from Prod **
*******************************************/
-- SELECT * INTO #TMP_PROD_Configs FROM ProductionServer.msdb.sys.configurations C
/*****************************************
** STEP 3: Run Cursor and copy results **
*****************************************/
DECLARE @SQL VARCHAR(MAX)
,@name VARCHAR(300)
,@value SQL_VARIANT
DECLARE SYNC_CURSOR CURSOR FOR
SELECT TOC.name, TOC.value
FROM sys.configurations C
LEFT JOIN #TMP_PROD_Configs TOC ON C.configuration_id = TOC.configuration_id
WHERE C.value <> TOC.VALUE
AND C.configuration_id NOT IN (1543,1544)
OPEN SYNC_CURSOR
FETCH NEXT FROM SYNC_CURSOR
INTO @name, @value
WHILE @@FETCH_STATUS = 0
BEGIN
SET @SQL = '
sp_configure ''' + @Name + ''', ' + CAST(@Value AS VARCHAR(25)) + '
GO
RECONFIGURE
GO
'
PRINT @SQL
FETCH NEXT FROM SYNC_CURSOR
INTO @name, @value
END
CLOSE SYNC_CURSOR
DEALLOCATE SYNC_CURSOR
/*************************************************
** STEP 4: Paste results below, review and run **
*************************************************/
/**********************************************
** STEP 5: Be good and drop your temp table **
**********************************************/
DROP TABLE #TMP_PROD_Configs
/***************************************
** STEP 6: Turn off Advanced Options **
***************************************/
sp_configure 'show advanced options', 0
GO
RECONFIGURE
GO
/***************************************
** Query to compare values if needed **
***************************************/
SELECT C.configuration_id, C.name, C.value,C.value_in_use, C.description
, TOC.configuration_id, TOC.name, TOC.value,TOC.value_in_use, TOC.description
FROM sys.configurations C
LEFT JOIN #TMP_PROD_Configs TOC ON C.configuration_id = TOC.configuration_id
WHERE C.value <> TOC.VALUE
AND C.configuration_id NOT IN (1543,1544)
Tuesday, May 10, 2011
Sync Config Settings between Servers
I'm setting up a new Staging Environment and am trying to find ways we can sync Staging with our production environment. My current task is to Sync the configurations and settings. Here is the script I came up with for the Config Settings. Hope it's of use.
Thursday, April 21, 2011
Getting Object Counts
Recently I was reporting a bug I noticed with a 3rd party tool, the support group asked me to send them "the number of instances, number of databases per instance, total number of tables and total number of indexes." Now I knew the number of instances I was monitoring, and I knew the approximate number of tables (perhaps a good DBA should know this number, not sure...do other DBA's out there with larger servers know the exact number? comment below, I'm interested), but I had no idea how many indexes. So I wrote a little script to give me this number and as I like to do, I'm making it available to the community, hope this helps you...if you find use of it (even years in the future) post below.
/***********************************
** Author: Eric Zierdt **
** Date: 4/21/2011 **
** Email: eric.zierdt@gmail.com **
***********************************/
DECLARE @CountAll AS TABLE (CntName VARCHAR(100), Cnt INT)
/*******************************************
** GET COUNT OF ALL NON-SYSTEM DATABASES **
*******************************************/
INSERT INTO @CountAll
SELECT 'TotalDBs',COUNT(1) AS TotalDBs
FROM sys.databases D
WHERE owner_sid <> '0x01'
/******************************
** GET COUNT OF ALL TABLES **
** FOR NON-SYSTEM DATABASES **
******************************/
DECLARE @CountTable AS TABLE (DB VARCHAR(100),CNT INT)
DECLARE @SQL VARCHAR(MAX)
DECLARE @DBName VARCHAR(128)
SET @DBName = ''
WHILE @DBName < (
SELECT MIN(name)
FROM sys.databases
WHERE name > @DBName
AND name NOT IN ('master','model','msdb','tempdb')
)
BEGIN
SELECT @DBName = MIN(NAME) FROM sys.databases WHERE name > @DBName AND NAME NOT IN ('master','model','msdb','tempdb')
SET @SQL = '
USE [' + @DBName + ']
SELECT ''' + @DBName + ''' AS DBName
,COUNT(1) AS Cnt
FROM sys.tables
'
INSERT INTO @CountTable
EXEC (@SQL)
END
INSERT INTO @CountAll
SELECT 'TotalTables',SUM(CNT) AS TotalTables
FROM @CountTable
/*******************************
** GET COUNT OF ALL INDEXES **
** FOR NON-SYSTEM DATABASES **
*******************************/
DECLARE @IndexTable AS TABLE (DB VARCHAR(100),TableName VARCHAR(100), CNT INT)
SET @DBName = ''
WHILE @DBName < (
SELECT MIN(name)
FROM sys.databases
WHERE name > @DBName
AND name NOT IN ('master','model','msdb','tempdb')
)
BEGIN
SELECT @DBName = MIN(NAME) FROM sys.databases WHERE name > @DBName AND NAME NOT IN ('master','model','msdb','tempdb')
SET @SQL = '
USE [' + @DBName + ']
SELECT ''' + @DBName + ''' AS DBName
,OBJECT_NAME(I.object_id) AS TableName
, COUNT(1) AS Cnt
FROM sys.indexes I
JOIN sys.tables T ON I.object_id = T.object_id
WHERE I.name IS NOT NULL
GROUP BY OBJECT_NAME(I.object_id)
'
--PRINT @SQL
INSERT INTO @IndexTable
EXEC (@SQL)
END
INSERT INTO @CountAll
SELECT 'TotalIndexes',SUM(CNT) AS TotalIndexes
FROM @IndexTable
SELECT *
FROM @CountAll
Thursday, March 31, 2011
GUIDS mess up defragmenting
I'm sure most of you know already that the use of GUIDS or UniqueIdentifiers is messy for doing defragmenting; because they are not sequential, they tend to cause problems for indexes and while you can decrease your FillFactor, I found that I'd rather not defrag an index only to find it at 99% fragmented again within a few hours.
I wrote a little script to help me identify the indexes that have an included column which is of type UniqueIdentifier.
I wrote a little script to help me identify the indexes that have an included column which is of type UniqueIdentifier.
-- CHANGE THE DB TO THE ONE YOU WANT TO SEE INDEXES WITH GUIDS IN THEM
;WITH LIST AS (
SELECT p.object_id
,OBJECT_NAME(p.object_id) AS TableName
,I.name AS IndexName
,C.Name AS ColName
,T.name AS DataType
FROM sys.partitions P
JOIN sysobjects O ON P.object_id = object_id(O.name)
JOIN sys.indexes I ON I.object_id = P.object_id AND I.index_id = P.index_id
JOIN sys.index_columns IC ON p.object_id = IC.object_id AND p.index_id = IC.index_id
JOIN sys.columns C ON P.object_id = C.object_id AND IC.column_id = C.column_id
JOIN sys.types T ON c.system_type_id = T.system_type_id
WHERE T.name = 'uniqueidentifier'
AND P.partition_number = 1
AND O.xtype='U'
)
SELECT DISTINCT *
FROM LIST
ORDER BY TableName,ColName
Monday, March 14, 2011
Titles
I'm sure we've all seen them before, the silly title that some people get that sound so ridiculous, like at my last company the front desk person was the Director of First Impressions. I recently saw that one of our business people was on a YouTube video showing off our new product and her title was "Brand Ambassador". For some reason this struck me as very silly, so now we have brand managers and brand ambassadors...why do people find it so necessary to find these odd title for themselves. So I've come up with a list of possible titles for myself, I'll see if I can get my manager to allow me to use one of them. I've come up with a good list I think, but feel free to post a comment with some other good ones a DBA or BI Developer might consider.
Director of Fluff
Director of Shock and Awe
"Behind the Scenes Magic" Technician
Data Sorcerer
Data Mage
Data Defender
High Priest of the Data arts
His Royal Majesty
Knight of the Data Table
BI Crew Member
Developer of the BI Arts
The Keymaster
Data Doctor
Report Guru
Cube Guardian
Data Ambassador
Keeper of all things BI
Data Librarian
The Undersecretary of Data
The Sultan of SQL
The Duke of Data
Director of Fluff
Director of Shock and Awe
"Behind the Scenes Magic" Technician
Data Sorcerer
Data Mage
Data Defender
High Priest of the Data arts
His Royal Majesty
Knight of the Data Table
BI Crew Member
Developer of the BI Arts
The Keymaster
Data Doctor
Report Guru
Cube Guardian
Data Ambassador
Keeper of all things BI
Data Librarian
The Undersecretary of Data
The Sultan of SQL
The Duke of Data
Thursday, January 6, 2011
Second to last Friday in Month
I was tasked with creating a SQL Agent Job that would run on the 2nd to last Friday every month; but how to do that? The 3rd Friday of every month isn't necessarily the 2nd to last Friday...The Job Scheduler has an option for "LAST" x of the month, but not 2nd to last, so after some thought I decided to write a function that would do it for me. Now, I'm happy to see any comments from people on a better way of doing this, but this is what I came up with, hope it helps you, or spawns some ideas for you.
Before you can implment the main function, you'll need to have a function that returns the first day of a given month, and the last day of a given month....Ok, you probably don't need them as functions, but I like them that way...
And now for the main attraction:
For some reason I can't figure out, there seems to be a /day tag at the end of the scirpt...it's not in my code, it appears to be in the exact spot as the code stating the end of my SQL...not sure, ignore it.
Before you can implment the main function, you'll need to have a function that returns the first day of a given month, and the last day of a given month....Ok, you probably don't need them as functions, but I like them that way...
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
-- =====================================================================
-- Author: Eric Zierdt: ericzierdt@gmail.com
-- Create date: 01/12/2010
-- Description: Returns the Last day of the month
-- Usage: SELECT dbo.LastOfMonth(GetDate())
-- =====================================================================
CREATE FUNCTION [dbo].[LastOfMonth]
(
-- Add the parameters for the function here
@DTC DATE -- DateToCheck
)
RETURNS DATE
AS
BEGIN
-- Declare the return variable here
DECLARE @Result DATE
-- Add the T-SQL statements to compute the return value here
SELECT @Result = CAST(DATEADD(d,-1,DATEADD(mm,DATEDIFF(m,0,@DTC)+1, 0)) AS DATE)
-- Return the result of the function
RETURN @Result
END
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
-- =====================================================================
-- Author: Eric Zierdt: ericzierdt@gmail.com
-- Create date: 01/12/2010
-- Description: Returns the first day of the month
-- Usage: SELECT dbo.FirstOfMonth(GetDate())
-- =====================================================================
CREATE FUNCTION [dbo].[FirstOfMonth]
(
-- Add the parameters for the function here
@DTC DATE -- DateToCheck
)
RETURNS DATE
AS
BEGIN
-- Declare the return variable here
DECLARE @Result DATE
-- Add the T-SQL statements to compute the return value here
SELECT @Result = CAST(DATEADD(ms,0,DATEADD(mm,DATEDIFF(m,0,@DTC), 0)) AS DATE)
-- Return the result of the function
RETURN @Result
END
And now for the main attraction:
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
-- =====================================================================
-- Author: Eric Zierdt: ericzierdt@gmail.com
-- Create date: 01/06/2011
-- Description: Returns the Nth lastof the given month
-- Usage: SELECT dbo.NthLastDoWOfMonth(GetDate(),6,1)
-- =====================================================================
CREATE FUNCTION [dbo].[NthLastDoWOfMonth]
(
-- Add the parameters for the function here
@MonthToCheck DATE -- MonthToCheck
,@DayOfWeek INT -- 1=Sunday...7=Saturday
,@WeeksFromLast INT-- 0=Last @DayOfWeek of the month, 1=2nd to last @DayOfWeek of the month, etc.
)
RETURNS DATE
AS
BEGIN
DECLARE @DaysInMonth TABLE (DayOfMonth DATE, DayOfWeek INT)
DECLARE @FirstOfMonth DATE
,@LastOfMonth DATE
,@ThisDay DATE
,@Result DATE
SELECT @FirstOfMonth = dbo.FirstOfMonth(@MonthToCheck), @LastOfMonth = dbo.LastOfMonth(@MonthToCheck)
SET @ThisDay = @FirstOfMonth
WHILE @ThisDay <= @LastOfMonth
BEGIN
INSERT INTO @DaysInMonth
SELECT @ThisDay, DATEPART(dw,@ThisDay)
SET @ThisDay = DATEADD(d,1,@ThisDay)
END
;WITH DaysCTE AS (
SELECT DayOfMonth ,ROW_NUMBER() OVER(ORDER BY DayOfMonth) AS 'RowNumber'
FROM @DaysInMonth DIM
WHERE DayOfWeek = @DayOfWeek
)
, MaxRow AS (
SELECT MAX(RowNumber) AS MaxRowNbr
FROM DaysCTE
)
SELECT @Result = DaysCTE.DayOfMonth
FROM DaysCTE, MaxRow
WHERE DaysCTE.RowNumber = MaxRow.MaxRowNbr-@WeeksFromLast
RETURN @Result
END
For some reason I can't figure out, there seems to be a /day tag at the end of the scirpt...it's not in my code, it appears to be in the exact spot as the code stating the end of my SQL...not sure, ignore it.
Tuesday, November 2, 2010
SQLSaturday #58 Presentation
Last Friday I had the opportunity to present a track on Table and index partitioning at SQLSaturday #58 (yes, SQLSaturday on a Friday) in Minnesota.
I had a great time, met a lot of great people and found the entire experience very enjoyable; I want to present more.
I said I'd put my queries up on my blog; so here it is.
Script 1 which was here is how to create a very basic partition and add values to it and move it (create a folder on your c drive called: PartitionDB prior to starting
Note that the USE Master at the bottom is so that in the next script (which I had in different files/tabs) when you drop the DB, you don't have an error about someone being connected.
Script 2 was used to demonstrate large volume of data being merged and split and how quickly the data goes from one table to another when you use the ALTER TABLE - SWITCH. When I gave the presentation, I had a subfolder created previously that had a Database with 8 Million rows already created. This wont work over the blog, so you'll need to create the table and dump in the 8M rows yourself....So I'm taking out the part about dropping the table and attaching the other database.
Script 3 is a real life example of how my company uses Partitioning. It uses a date based range; and shows the process we use to move and drop the oldest data:
The final script was a script to show how many records are in each filegroup/range; I find it is very useful for me for many different situations.
I had a great time, met a lot of great people and found the entire experience very enjoyable; I want to present more.
I said I'd put my queries up on my blog; so here it is.
Script 1 which was here is how to create a very basic partition and add values to it and move it (create a folder on your c drive called: PartitionDB prior to starting
/**************************************
** Database Partitioning Script **
** Eric Zierdt, W3i **
** http://ericemployed.blogspot.com **
** http://blog.w3i.com **
**************************************/
USE master
GO
CREATE DATABASE PartitionDB ON
PRIMARY(NAME = db_data, FILENAME = 'c:\PartitionDB\db.mdf' ,SIZE = 25MB)
, FILEGROUP FG1( NAME = FG1_data, FILENAME = 'c:\PartitionDB\FG1.ndf' ,SIZE = 25MB)
, FILEGROUP FG2( NAME = FG2_data, FILENAME = 'c:\PartitionDB\FG2.ndf', SIZE = 25MB)
, FILEGROUP FG3( NAME = FG3_data, FILENAME = 'c:\PartitionDB\FG3.ndf', SIZE = 25MB)
, FILEGROUP FG4( NAME = FG4_data, FILENAME = 'c:\PartitionDB\FG4.ndf', SIZE = 25MB)
, FILEGROUP FG5( NAME = FG5_data, FILENAME = 'c:\PartitionDB\FG5.ndf', SIZE = 25MB)
, FILEGROUP FGDefault( NAME = FGDefault_data, FILENAME = 'c:\PartitionDB\FGDefault.ndf', SIZE = 25MB)
LOG ON( NAME = db_log, FILENAME = 'c:\PartitionDB\log.ldf', SIZE = 1MB,FILEGROWTH = 100MB ) ;
GO
USE PartitionDB
GO
CREATE PARTITION FUNCTION EZ_PartitionFunction (INT) AS
RANGE RIGHT FOR VALUES (1000, 2000, 3000, 4000, 5000);
CREATE PARTITION SCHEME EZ_PartitionScheme AS
PARTITION EZ_PartitionFunction TO([FGDefault],[FG1], [FG2], [FG3], [FG4], [FG5])
GO
CREATE TABLE EZ_PartionedTable
( ID int PRIMARY KEY,
Name VARCHAR(50)
)
ON EZ_PartitionScheme(ID)
INSERT INTO EZ_PartionedTable (ID,NAME)
VALUES (1,'Test')
,(1001,'Test')
,(2001,'Test')
,(3001,'Test')
,(4001,'Test')
,(5001,'Test')
,(6001,'Test')
,(7001,'Test')
,(8001,'Test')
USE [master]
GO
ALTER DATABASE [PartitionDB] ADD FILEGROUP [FG6]
GO
ALTER DATABASE [PartitionDB] ADD FILE ( NAME = N'FG6_data', FILENAME = N'C:\PartitionDB\FG6.ndf' , SIZE = 25MB , FILEGROWTH = 5MB )
TO FILEGROUP [FG6]
GO
USE PartitionDB
GO
-- Adding a new Scheme and Function
ALTER PARTITION SCHEME EZ_PartitionScheme NEXT USED [FG6]
ALTER PARTITION FUNCTION EZ_PartitionFunction() SPLIT RANGE (6000)
-- Removing a Scheme and Function
ALTER PARTITION FUNCTION EZ_PartitionFunction() MERGE RANGE (6000)
-- Adding a new Scheme and Function
ALTER PARTITION SCHEME EZ_PartitionScheme NEXT USED [FG6]
ALTER PARTITION FUNCTION EZ_PartitionFunction() SPLIT RANGE (6000)
-- Removing a Scheme and Function out of Sequence
ALTER PARTITION FUNCTION EZ_PartitionFunction ()MERGE RANGE (5000)
ALTER PARTITION SCHEME EZ_PartitionScheme NEXT USED [FG5]
ALTER PARTITION FUNCTION EZ_PartitionFunction() SPLIT RANGE (5000)
USE master
GO
Note that the USE Master at the bottom is so that in the next script (which I had in different files/tabs) when you drop the DB, you don't have an error about someone being connected.
Script 2 was used to demonstrate large volume of data being merged and split and how quickly the data goes from one table to another when you use the ALTER TABLE - SWITCH. When I gave the presentation, I had a subfolder created previously that had a Database with 8 Million rows already created. This wont work over the blog, so you'll need to create the table and dump in the 8M rows yourself....So I'm taking out the part about dropping the table and attaching the other database.
/**************************************
** Database Partitioning Script **
** Eric Zierdt, W3i **
** http://ericemployed.blogspot.com **
** http://blog.w3i.com **
**************************************/
/*** DROP TABLE AND PARTITION ***/
USE PartitionDB
DROP TABLE EZ_PartionedTable
DROP PARTITION SCHEME EZ_PartitionScheme
DROP PARTITION FUNCTION EZ_PartitionFunction
ALTER DATABASE [PartitionDB] REMOVE FILE [FG6_data]
GO
ALTER DATABASE [PartitionDB] REMOVE FILEGROUP [FG6]
GO
/*** CREATE NEW PARTITION FUNCTION BASED ON MILLION RECORDS ***/
CREATE PARTITION FUNCTION EZ_PartitionFunction (INT) AS
RANGE RIGHT FOR VALUES (1000000, 2000000, 3000000, 4000000, 5000000);
/*** CREATE NEW PARTITION SCHEME ***/
CREATE PARTITION SCHEME EZ_PartitionScheme AS
PARTITION EZ_PartitionFunction TO([FGDefault],[FG1], [FG2], [FG3], [FG4], [FG5])
GO
/*** CREATE NEW TABLE ***/
CREATE TABLE EZ_PartionedTable
( ID int NOT NULL IDENTITY (1000000, 1),
Name VARCHAR(50)
)
ON EZ_PartitionScheme(ID)
/*** ADD PK TO TABLE ***/
ALTER TABLE dbo.EZ_PartionedTable ADD CONSTRAINT
PK_EZ_PartionedTable PRIMARY KEY CLUSTERED
(
ID
) WITH( STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON EZ_PartitionScheme(ID)
GO
/*** LOAD 8 MILLION RECORDS...Go get some food, this will take a while... ***/
DECLARE @i INT = 1
WHILE @i <= 8000000
BEGIN
INSERT INTO EZ_PartionedTable (NAME)
VALUES ('Test')
SET @i += 1
END
/*** ADD A NEW FILEGROUP TO THE DATABASE ***/
USE [master]
GO
ALTER DATABASE [PartitionDB] ADD FILEGROUP [FG6]
GO
ALTER DATABASE [PartitionDB] ADD FILE ( NAME = N'FG6_data', FILENAME = N'C:\PartitionDB\FG6.ndf' , SIZE = 25MB , FILEGROWTH = 5MB )
TO FILEGROUP [FG6]
GO
USE PartitionDB
GO
/*** ADD A NEW FILEGROUP TO THE PARTITION ***/
ALTER PARTITION SCHEME EZ_PartitionScheme NEXT USED [FG6]
ALTER PARTITION FUNCTION EZ_PartitionFunction() SPLIT RANGE (6000000)
--NOTE THE LOG WILL GROW SIGNIFIGANTLY (look in your data directory, the log will grow to around 1.2 GB)
--The point of this is to say, that you need to be careful when using SPLIT and MERGE..doing it to a an empty range is ok, but if the range has data, it's dangerous
/*** SHRINK LOG ***/
USE [PartitionDB]
GO
DBCC SHRINKFILE (N'db_log' , 1)
GO
/*** REMOVE FILEGROUP FROM THE PARTITION ***/
ALTER PARTITION FUNCTION EZ_PartitionFunction ()MERGE RANGE (6000000)
/*** SHRINK LOG ***/
USE [PartitionDB]
GO
DBCC SHRINKFILE (N'db_log' , 1)
GO
/*** SHRINK LOG ***/
USE [PartitionDB]
GO
DBCC SHRINKFILE (N'FG6_data' , 25)
GO
/*** CREATE STAGE TABLE FOR SWITCH ***/
CREATE TABLE EZ_PartionedTable_Stage
( ID int NOT NULL IDENTITY (1000000, 1),
Name VARCHAR(50)
)
ON EZ_PartitionScheme(ID)
ALTER TABLE dbo.EZ_PartionedTable_Stage ADD CONSTRAINT
PK_EZ_PartionedTable_Stage PRIMARY KEY CLUSTERED
(
ID
) WITH( STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON EZ_PartitionScheme(ID)
GO
--SHOW View Table Partition Data and explain what SWITCH DOES
ALTER TABLE EZ_PartionedTable SWITCH PARTITION 6 TO EZ_PartionedTable_Stage PARTITION 6
ALTER TABLE EZ_PartionedTable_Stage SWITCH PARTITION 6 TO EZ_PartionedTable PARTITION 6
--SHOW Dropping of data and removing old file group.
ALTER TABLE EZ_PartionedTable SWITCH PARTITION 6 TO EZ_PartionedTable_Stage PARTITION 6
DROP TABLE EZ_PartionedTable_Stage
ALTER DATABASE [PartitionDB] REMOVE FILE [FG6_data]
GO
ALTER DATABASE [PartitionDB] REMOVE FILEGROUP [FG6]
GO
/*** SHRINK LOG ***/
USE [PartitionDB]
GO
DBCC SHRINKFILE (N'FG5_data' , 25)
GO
USE master
GO
Script 3 is a real life example of how my company uses Partitioning. It uses a date based range; and shows the process we use to move and drop the oldest data:
/**************************************
** Database Partitioning Script **
** Eric Zierdt, W3i **
** http://ericemployed.blogspot.com **
** http://blog.w3i.com **
**************************************/
USE master
GO
ALTER DATABASE [PartitionDB] SET SINGLE_USER WITH ROLLBACK IMMEDIATE
GO
USE [master]
GO
DROP DATABASE PartitionDB
GO
/*** CREATE DB WITH FILEGROUP NAMES THAT ARE DATE BASED - AUGUST, SEPTEMBER AND OCTOBER 2010 ***/
CREATE DATABASE PartitionDB ON
PRIMARY(NAME = db_data, FILENAME = 'c:\PartitionDB\db.mdf' ,SIZE = 4MB)
, FILEGROUP TRANDTA_201008 ( NAME = TRANDTA_201008, FILENAME = 'c:\PartitionDB\TRANDTA_201008.ndf' ,SIZE = 25MB)
, FILEGROUP TRANDTA_201009( NAME = TRANDTA_201009, FILENAME = 'c:\PartitionDB\TRANDTA_201009.ndf', SIZE = 25MB)
, FILEGROUP TRANDTA_201010( NAME = TRANDTA_201010, FILENAME = 'c:\PartitionDB\TRANDTA_201010.ndf', SIZE = 25MB)
, FILEGROUP TRANDEFAULT( NAME = TRANDEFAULT_data, FILENAME = 'c:\PartitionDB\FGDefault.ndf', SIZE = 25MB)
LOG ON( NAME = db_log, FILENAME = 'c:\PartitionDB\log.ldf', SIZE = 1MB,FILEGROWTH = 100MB ) ;
GO
USE PartitionDB
GO
/*** MAKE A PARTITION FUNCTION WITH RANGES FOR 08,09,10 2010 ***/
CREATE PARTITION FUNCTION [EZ_PartitionFunction](DATETIME) AS RANGE RIGHT FOR VALUES (
N'2010-08-01'
, N'2010-09-01'
, N'2010-10-01')
GO
/*** BIND THE SCHEME TO THE FUNCTION ***/
CREATE PARTITION SCHEME EZ_PartitionScheme AS
PARTITION EZ_PartitionFunction TO(
[TRANDEFAULT]
, [TRANDTA_201008]
, [TRANDTA_201009]
, [TRANDTA_201010])
GO
/*** CREATE A TABLE AND BIND IT TO THE PARTITION ***/
CREATE TABLE EZ_PartionedTable
( ID int,
DateAdded DATETIME PRIMARY KEY,
Name VARCHAR(50)
)
ON EZ_PartitionScheme(DateAdded)
/*** INSERT SOME DATA INTO THE NEW TABLE, 3 RECORDS PER MONTH ***/
INSERT INTO EZ_PartionedTable (ID,DateAdded, NAME)
VALUES (1,'08/01/2010', 'Test')
,(2,'08/10/2010','Test')
,(3,'08/31/2010','Test')
,(4,'09/01/2010','Test')
,(5,'09/15/2010','Test')
,(6,'09/30/2010 23:59:59','Test')
,(7,'10/01/2010','Test')
,(8,'10/07/2010','Test')
,(9,'10/22/2010','Test')
/*** On around the 25th of each month, we fire off an agent job that calls a
proc that makes a new month filegroup, file, function and scheme ***/
USE [master]
GO
ALTER DATABASE [PartitionDB] ADD FILEGROUP [TRANDTA_201011]
GO
ALTER DATABASE [PartitionDB] ADD FILE ( NAME = N'TRANDTA_201011', FILENAME = N'C:\PartitionDB\TRANDTA_201011.ndf' , SIZE = 25MB , FILEGROWTH = 5MB )
TO FILEGROUP [TRANDTA_201011]
GO
USE PartitionDB
GO
ALTER PARTITION SCHEME EZ_PartitionScheme NEXT USED [TRANDTA_201011]
ALTER PARTITION FUNCTION EZ_PartitionFunction() SPLIT RANGE ('11/01/2010')
/*** INSERT A RECORD FOR OCTOBER AND CHECK THE VIEW PARTITION SCRIPT ***/
INSERT INTO EZ_PartionedTable (ID,DateAdded, NAME)
VALUES (1,'10/31/2010', 'Test')
/*** INSERT A RECORD FOR NOVEMBER AND CHECK THE VIEW PARTITION SCRIPT ***/
INSERT INTO EZ_PartionedTable (ID,DateAdded, NAME)
VALUES (2,'11/01/2010', 'Test')
/*** Within the first few days of the month we verify that the partitions are correct, then we run a proc that does this: ***/
-- MAKE A STAGING TABLE THAT IS EXACTLY THE SAME AS THE MAIN TABLE
CREATE TABLE EZ_PartionedTable_Stage
( ID int,
DateAdded DATETIME PRIMARY KEY,
Name VARCHAR(50)
)
ON EZ_PartitionScheme(DateAdded)
/*** SWITCH OUT THE OLDEST MONTH'S PARTITION DATA TO THE STAGING TABLE ***/
ALTER TABLE EZ_PartionedTable SWITCH PARTITION 2 TO EZ_PartionedTable_Stage PARTITION 2
/**** BCP OUT DATA (NOT SHOWN HERE) ****/
/*** DROP STAGING DATA (AND ALL THE DATA FROM THE OLDEST MONTH WITH IT) ***/
DROP TABLE EZ_PartionedTable_Stage
/*** DROP THE FUNCTION RANGE FOR THE OLDEST MONTH ***/
ALTER PARTITION FUNCTION EZ_PartitionFunction() MERGE RANGE ('8/01/2010')
GO
/*** DROP THE FILEGROUP AND FILE FOR THE OLDEST MONTH ***/
ALTER DATABASE [PartitionDB] REMOVE FILE [TRANDTA_201008]
GO
ALTER DATABASE [PartitionDB] REMOVE FILEGROUP [TRANDTA_201008]
GO
--DMV's that are useful:
SELECT *
FROM sys.partition_functions
SELECT *
FROM sys.partition_schemes
-- Joins to Function on function_id
SELECT *
FROM sys.partition_range_values
-- Joins to Function on function_id
SELECT *
FROM sys.indexes
-- Joins to schemes on data_space_id
SELECT *
FROM sys.data_spaces
-- Joins to indexes on data_space_id
SELECT *
FROM sys.filegroups
-- Joins to indexes on data_space_id
The final script was a script to show how many records are in each filegroup/range; I find it is very useful for me for many different situations.
/**************************************
** Database Partitioning Script **
** Eric Zierdt, W3i **
** http://ericemployed.blogspot.com **
** http://blog.w3i.com **
**************************************/
USE PartitionDB
DECLARE @TableName VARCHAR(150) = '%%'
,@FileGroup VARCHAR(150) = '%%'
SELECT OBJECT_NAME(p.object_id) AS TableName,
i.name AS IndexName,
p.partition_number AS PartitionNumber,
fg.name AS FileGroupName,
prv_left.value AS LowerBoundaryValue,
prv_right.value AS UpperBoundaryValue,
p.rows AS Rows
FROM sys.partitions AS p
JOIN sys.indexes AS i ON i.object_id = p.object_id AND i.index_id = p.index_id
JOIN sys.data_spaces AS ds ON ds.data_space_id = i.data_space_id
JOIN sys.partition_schemes AS ps ON ps.data_space_id = ds.data_space_id
JOIN sys.partition_functions AS pf ON pf.function_id = ps.function_id
JOIN sys.destination_data_spaces AS dds2 ON dds2.partition_scheme_id = ps.data_space_id AND dds2.destination_id = p.partition_number
JOIN sys.filegroups AS fg ON fg.data_space_id = dds2.data_space_id
LEFT JOIN sys.partition_range_values AS prv_left ON ps.function_id = prv_left.function_id AND prv_left.boundary_id = p.partition_number - 1
LEFT JOIN sys.partition_range_values AS prv_right ON ps.function_id = prv_right.function_id AND prv_right.boundary_id = p.partition_number
WHERE OBJECTPROPERTY(p.object_id, 'ISMSShipped') = 0
AND OBJECT_NAME(p.object_id) LIKE @TableName
AND fg.name LIKE @FileGroup
ORDER BY TableName, IndexName, PartitionNumber;
USE master
Thursday, July 8, 2010
Log Shipping error
I had a problem where I noticed on my primary log shipping machine (the one the data originates on) that the LS_Alerts job was failing. It said that a new database we were working on was failing. I also noticed that I had two jobs that didn't belong on that machine..I had a LS_Copy and a LS_Restore...which should have been on the secondary machine. I believe that another admin set it up from the development machine during the initial phase for some reason, but I couldn't find a source machine. I delete the two jobs from the Job manager, but the Alert job still showed failures when run. I decided to figure out what was the problem, and this is what I did to look into it:
I saw that the alert job ran the proc sys.sp_check_log_shipping_monitor_alert, so I opened up the proc to see what it was doing. It calls the following:
and raises an error if there are records.
When I ran it on my primary machine the second statement (from log_shipping_monitor_secondary) had a record in it; but the odd thing was that when I just did a select * against the table, I saw that the primary server for that statement was the same server as the secondary server...so it was set to logship to its self. From that point, it was simple, delete the record and the error went away.
Hope that helps someone in the future.
I saw that the alert job ran the proc sys.sp_check_log_shipping_monitor_alert, so I opened up the proc to see what it was doing. It calls the following:
(select primary_server
,primary_database
,isnull(threshold_alert, 14420)
,backup_threshold
,datediff(minute, last_backup_date_utc, @curdate_utc)
,cast(0 as int)
from msdb.dbo.log_shipping_monitor_primary
where threshold_alert_enabled = 1
and datediff(minute, last_backup_date_utc, @curdate_utc) > backup_threshold)
union
(select secondary_server
,secondary_database
,isnull(threshold_alert, 14421)
,restore_threshold
,datediff(minute, last_restored_date_utc, @curdate_utc)
,isnull(last_restored_latency,0)
from msdb.dbo.log_shipping_monitor_secondary
where threshold_alert_enabled = 1
and (datediff(minute, last_restored_date_utc, @curdate_utc) > restore_threshold
or last_restored_latency > restore_threshold))
and raises an error if there are records.
When I ran it on my primary machine the second statement (from log_shipping_monitor_secondary) had a record in it; but the odd thing was that when I just did a select * against the table, I saw that the primary server for that statement was the same server as the secondary server...so it was set to logship to its self. From that point, it was simple, delete the record and the error went away.
Hope that helps someone in the future.
Labels:
Log Shipping,
SQL,
SQL Server 2008,
SS2008,
TSQL
Subscribe to:
Posts (Atom)