블로그 이미지
LifeisSimple

calendar

1 2 3 4
5 6 7 8 9 10 11
12 13 14 15 16 17 18
19 20 21 22 23 24 25
26 27 28 29 30 31

Notice

2011. 3. 24. 17:53 Brain Trainning/DataBase
출처 : http://support.microsoft.com/kb/246133;
http://support.microsoft.com/kb/918992
 

 USE master

GO

IF OBJECT_ID ('sp_hexadecimal') IS NOT NULL

  DROP PROCEDURE sp_hexadecimal

GO

CREATE PROCEDURE sp_hexadecimal

    @binvalue varbinary(256),

    @hexvalue varchar (514) OUTPUT

AS

DECLARE @charvalue varchar (514)

DECLARE @i int

DECLARE @length int

DECLARE @hexstring char(16)

SELECT @charvalue = '0x'

SELECT @i = 1

SELECT @length = DATALENGTH (@binvalue)

SELECT @hexstring = '0123456789ABCDEF'

WHILE (@i <= @length)

BEGIN

  DECLARE @tempint int

  DECLARE @firstint int

  DECLARE @secondint int

  SELECT @tempint = CONVERT(int, SUBSTRING(@binvalue,@i,1))

  SELECT @firstint = FLOOR(@tempint/16)

  SELECT @secondint = @tempint - (@firstint*16)

  SELECT @charvalue = @charvalue +

    SUBSTRING(@hexstring, @firstint+1, 1) +

    SUBSTRING(@hexstring, @secondint+1, 1)

  SELECT @i = @i + 1

END

 

SELECT @hexvalue = @charvalue

GO

 

IF OBJECT_ID ('sp_help_revlogin') IS NOT NULL

  DROP PROCEDURE sp_help_revlogin

GO

CREATE PROCEDURE sp_help_revlogin @login_name sysname = NULL AS

DECLARE @name sysname

DECLARE @type varchar (1)

DECLARE @hasaccess int

DECLARE @denylogin int

DECLARE @is_disabled int

DECLARE @PWD_varbinary  varbinary (256)

DECLARE @PWD_string  varchar (514)

DECLARE @SID_varbinary varbinary (85)

DECLARE @SID_string varchar (514)

DECLARE @tmpstr  varchar (1024)

DECLARE @is_policy_checked varchar (3)

DECLARE @is_expiration_checked varchar (3)

 

DECLARE @defaultdb sysname

 

IF (@login_name IS NULL)

  DECLARE login_curs CURSOR FOR

 

      SELECT p.sid, p.name, p.type, p.is_disabled, p.default_database_name, l.hasaccess, l.denylogin FROM

sys.server_principals p LEFT JOIN sys.syslogins l

      ON ( l.name = p.name ) WHERE p.type IN ( 'S', 'G', 'U' ) AND p.name <> 'sa'

ELSE

  DECLARE login_curs CURSOR FOR

 

 

      SELECT p.sid, p.name, p.type, p.is_disabled, p.default_database_name, l.hasaccess, l.denylogin FROM

sys.server_principals p LEFT JOIN sys.syslogins l

      ON ( l.name = p.name ) WHERE p.type IN ( 'S', 'G', 'U' ) AND p.name = @login_name

OPEN login_curs

 

FETCH NEXT FROM login_curs INTO @SID_varbinary, @name, @type, @is_disabled, @defaultdb, @hasaccess, @denylogin

IF (@@fetch_status = -1)

BEGIN

  PRINT 'No login(s) found.'

  CLOSE login_curs

  DEALLOCATE login_curs

  RETURN -1

END

SET @tmpstr = '/* sp_help_revlogin script '

PRINT @tmpstr

SET @tmpstr = '** Generated ' + CONVERT (varchar, GETDATE()) + ' on ' + @@SERVERNAME + ' */'

PRINT @tmpstr

PRINT ''

WHILE (@@fetch_status <> -1)

BEGIN

  IF (@@fetch_status <> -2)

  BEGIN

    PRINT ''

    SET @tmpstr = '-- Login: ' + @name

    PRINT @tmpstr

    IF (@type IN ( 'G', 'U'))

    BEGIN -- NT authenticated account/group

 

      SET @tmpstr = 'CREATE LOGIN ' + QUOTENAME( @name ) + ' FROM WINDOWS WITH DEFAULT_DATABASE = [' + @defaultdb + ']'

    END

    ELSE BEGIN -- SQL Server authentication

        -- obtain password and sid

            SET @PWD_varbinary = CAST( LOGINPROPERTY( @name, 'PasswordHash' ) AS varbinary (256) )

        EXEC sp_hexadecimal @PWD_varbinary, @PWD_string OUT

        EXEC sp_hexadecimal @SID_varbinary,@SID_string OUT

 

        -- obtain password policy state

        SELECT @is_policy_checked = CASE is_policy_checked WHEN 1 THEN 'ON' WHEN 0 THEN 'OFF' ELSE NULL END FROM sys.sql_logins WHERE name = @name

        SELECT @is_expiration_checked = CASE is_expiration_checked WHEN 1 THEN 'ON' WHEN 0 THEN 'OFF' ELSE NULL END FROM sys.sql_logins WHERE name = @name

 

            SET @tmpstr = 'CREATE LOGIN ' + QUOTENAME( @name ) + ' WITH PASSWORD = ' + @PWD_string + ' HASHED, SID = ' + @SID_string + ', DEFAULT_DATABASE = [' + @defaultdb + ']'

 

        IF ( @is_policy_checked IS NOT NULL )

        BEGIN

          SET @tmpstr = @tmpstr + ', CHECK_POLICY = ' + @is_policy_checked

        END

        IF ( @is_expiration_checked IS NOT NULL )

        BEGIN

          SET @tmpstr = @tmpstr + ', CHECK_EXPIRATION = ' + @is_expiration_checked

        END

    END

    IF (@denylogin = 1)

    BEGIN -- login is denied access

      SET @tmpstr = @tmpstr + '; DENY CONNECT SQL TO ' + QUOTENAME( @name )

    END

    ELSE IF (@hasaccess = 0)

    BEGIN -- login exists but does not have access

      SET @tmpstr = @tmpstr + '; REVOKE CONNECT SQL TO ' + QUOTENAME( @name )

    END

    IF (@is_disabled = 1)

    BEGIN -- login is disabled

      SET @tmpstr = @tmpstr + '; ALTER LOGIN ' + QUOTENAME( @name ) + ' DISABLE'

    END

    PRINT @tmpstr

  END

 

  FETCH NEXT FROM login_curs INTO @SID_varbinary, @name, @type, @is_disabled, @defaultdb, @hasaccess, @denylogin

   END

CLOSE login_curs

DEALLOCATE login_curs

RETURN 0

GO

 

EXEC sp_help_revlogin

posted by LifeisSimple
2011. 3. 23. 13:50 Brain Trainning/Storage

출처 : http://www.sqlteam.com/article/benchmarking-disk-io-performance-size-matters

Benchmarking Disk I/O Performance: Size Matters!

By Linchi Shea on 30 October 2007 | 9 Comments | Tags: Performance TuningDisk Tuning


This article discusses the impact of test file size when benchmarking SQL Server disk performance. (Editors Note: This is our first article by Linchi Shea.  Linchi is a SQL Server MVP and an expert on SQL Server hard drive performance.  The article addresses the impact of test file size when using SQLIO.EXE to test hard drive performance and how an incorrect file size can give misleading results.)

If you use a tool such as IOMeter or sqlio.exe--both of which are freely downloadable--to benchmark the I/O performance of a disk, you typically would configure the tool to generate I/Os against a test file that is placed on the disk.

For instance, with sqlio.exe the test file is specified on the command line as follows:

sqlIO -kR -t1 -s30 -dE -o32 -frandom -b8 -BH -LS testfile.dat

In this case, you are benchmarking 8K random reads against the testfile.dat file on the E drive. The sqlio.exe utility will issue the 8K read requests from a single thread and will maintain an I/O queue depth of 32.

The question is: Does the size of the test file matter? If so, what size should you pick?

Benchmark results may lie

I was interested in comparing the 8K random reads performance between the E drive and the F drive on one of my machines. So, I first ran the above command line, and that gave me the following results:

E drive:

IOs/sec: 2,681
MBs/sec: 21
Min_Latency (ms): 0
Avg_Latency (ms): 11
Max_Latency (ms): 310

I then changed the -d parameter value from E to F, and re-ran the command line, and obtained the following results:

F drive:

IOs/sec: 19,871
MBs/sec: 155
Min_Latency (ms): 0
Avg_Latency (ms): 1
Max_Latency (ms): 16

From these results, it is more than obvious that the F drive was far more performant than the E drive, at least for 8K random reads.

Unfortunately, that conclusion would be terribly far from the truth!

"Wait a minute and stop right there! Are you telling me that a drive that can do ~20,000 reads per second is not significantly better than a drive that does only 2600 reads a second?"

Well, that's not what I'm telling you. What I failed to disclose in the little comparison tests described above was that the test file on the F drive was 256MB in size, whereas the test file on the E drive was 128GB in size. It turns out that the size of the test file has such a dramatic impact on the resulting I/O metrics that I was literally comparing apples and oranges.

Running the same sqlio.exe command line against a 256MB test file on the E drive produced to the following results:

IOs/sec: 16,635
MBs/sec: 130
Min_Latency (ms): 0
Avg_Latency (ms): 1
Max_Latency (ms): 29

The F drive still performed better than did the E drive. But instead eight times better, the F drive was about 20% better in I/Os per second.

The impact of test file size

To drive home the fact that the size of the test file can have an overwhelming impact on the disk I/O benchmark results, I ran the following identical command line repeatedly with the testfile.dat set to different sizes:

sqlIO -kR -t1 -s30 -dE -o32 -frandom -b8 -BH -LS testfile.dat

The sizes of testfile.dat I tested included the following:

  • 256MB
  • 512MB
  • 2GB
  • 8GB
  • 32GB
  • 64GB
  • 128GB

The chart below illustrates the impact on I/Os per second when the size of the test file was the only parameter changed:

For different disk drives, the shape of the curve and the inflexion point will differ. But there is no mistake that it is meaningless to state a disk I/O metric without also disclosing the size of the test file.

Why size matters?

Among many factors that may help render the disk benchmark results sensitive to test file size, the following two are worth noting:

  • Disk seek area
  • Cache size

Before explaining why these two factors are significant, let me note that when you configure sqlio.exe to do random reads (or writes) there is no data locality. In other words, the I/O requests will not concentrate on a subset of the data, but will be distributed over the entire file.

Disk seek area. A larger file generally occupies a larger area on the disk platter than does a smaller file. For random access, this means that the disk arm has to cover more tracks, leading to a higher seek time on average. Since seek time is often the most significant time-consuming component in processing an I/O request, average I/O performance is therefore expected to drop when you increase the test file size.

Cache size. If an I/O request is satisfied with data already in the cache of a disk subsystem, seek time is no longer a factor. In addition, the more I/O requests that are satisfied only from the cache, the higher the average I/O performance. And the smaller the test file relative to the cache, the more likely the requested data will be found in the cache. It therefore follows that the smaller the test file the higher the average I/O performance.

What size to use?

This is a frequently asked question, but unfortunately there is no one-size-fit-all answer. If you read the literature on this topic, you will no doubt find that the most common recommendation is to use a test file that is larger than the cache size of the disk subsystem being benchmarked.

This is generally a good recommendation, but not an absolute truth. It is generally good because as a SQL Server user, you benchmark a disk to make better use of SQL Server for your application databases, and your application databases in general rarely fit entirely into the cache of your disk subsystem.

Then, why would you ever want to use a small test file? The most obvious reason is when you are dealing with databases that are small enough to be cached, and you want to get disk benchmark numbers that are relevant to these databases. There are also specific benchmarking scenarios in which a small test file is a must. Consider the case where you want to measure the read latency between the host OS and the I/O subsystem cache. You should configure your tests to eliminate the influence of the disk media access time, and a test file that is small enough to fit into the cache will do the trick.

posted by LifeisSimple
2011. 3. 23. 11:45 Brain Trainning/Storage
출처 : http://sqlblog.com/blogs/jonathan_kehayias/archive/2010/05/25/parsing-sqlio-output-to-excel-charts-using-regex-in-powershell.aspx

Parsing SQLIO Output to Excel Charts using Regex in PowerShell

Today Joe Webb (Blog|Twitter) blogged about The Power of Regex in Powershell, and in his post he shows how to parse the SQL Server Error Log for events of interest.  At the end of his blog post Joe asked about other places where Regular Expressions have been useful in PowerShell so I thought I’d blog my script for parsing SQLIO output using Regex in PowerShell, to populate an Excel worksheet and build charts based on the results automatically.

If you’ve never used SQLIO, Brent Ozar (Blog|Twitter) has a article on SQLServerPedia titled SAN Performance Tuning with SQLIO that includes a lot of information as well as a video tutorial on its use.  I have been using SQLIO quite a bit this year, mainly to gather performance information for systems before and after reconfiguring their storage to show percent impact information to the business for making changes in storage like migrating from RAID 5 to 10, and aligning disk partitions used for database and log file storage.  I have also used it in benchmarking new SAN configurations and the SAN configuration of our Virtual Machine environment to show team members why I have recommended that a number of our physical servers be migrated onto Virtual Machines.

I generally use a param.txt file similar to:

G:\MSSQL\testfile1.dat 4 0x0 32768

which creates a 32GB test file and then have a DOS batch file that runs the actual SQLIO tests using a script like:

sqlio -kW -t16 -s10 -o8 -fsequential -b8 -BH -LS -Fparam.txt

sqlio -kW -t8 -s360 -o8 -frandom -b8 -BH -LS G:\MSSQL\testfile1.dat > post.txt 
sqlio -kW -t8 -s360 -o8 -frandom -b32 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kW -t8 -s360 -o8 -frandom -b64 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kW -t8 -s360 -o8 -frandom -b128 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kW -t8 -s360 -o8 -frandom -b256 -BH -LS G:\MSSQL\testfile1.dat >> post.txt

sqlio -kR -t8 -s360 -o8 -frandom -b8 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kR -t8 -s360 -o8 -frandom -b32 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kR -t8 -s360 -o8 -frandom -b64 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kR -t8 -s360 -o8 -frandom -b128 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kR -t8 -s360 -o8 -frandom -b256 -BH -LS G:\MSSQL\testfile1.dat >> post.txt

sqlio -kW -t8 -s360 -o8 -fsequential -b8 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kW -t8 -s360 -o8 -fsequential -b32 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kW -t8 -s360 -o8 -fsequential -b64 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kW -t8 -s360 -o8 -fsequential -b128 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kW -t8 -s360 -o8 -fsequential -b256 -BH -LS G:\MSSQL\testfile1.dat >> post.txt

sqlio -kR -t8 -s360 -o8 -fsequential -b8 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kR -t8 -s360 -o8 -fsequential -b32 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kR -t8 -s360 -o8 -fsequential -b64 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kR -t8 -s360 -o8 -fsequential -b128 -BH -LS G:\MSSQL\testfile1.dat >> post.txt 
sqlio -kR -t8 -s360 -o8 -fsequential -b256 -BH -LS G:\MSSQL\testfile1.dat >> post.txt

Which runs the gamit of tests using 8, 32, 64, 128, and 256K block sizes, with 16 threads, and 8 pending IO.  You can certainly expand the test set much further than this and do a very exhaustive testing regimen, I am by no means saying that my tests above are the best fit, but they have sufficiently met my needs for the tuning I have been doing.  Once the tests have been run the post.txt file will contain an output entry similar the following for every test that was run.

sqlio v1.5.SG 
using system counter for latency timings, 14318180 counts per second 
16 threads writing for 60 secs to file d:\MSSQL\testfile1.dat 
    using 8KB random IOs 
    enabling multiple I/Os per thread with 8 outstanding 
    buffering set to use hardware disk cache (but not file cache) 
using current size: 2048 MB for file: d:\MSSQL\testfile1.dat 
initialization done 
CUMULATIVE DATA: 
throughput metrics: 
IOs/sec: 18527.91 
MBs/sec:   144.74 
latency metrics: 
Min_Latency(ms): 0 
Avg_Latency(ms): 6 
Max_Latency(ms): 4900 
histogram: 
ms: 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24+ 
%: 78  6  8  1  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  2 

Linchi Shea (Blog) wrote a blog post three years ago that showed how to Parse the sqlio.exe Output using Perl.  I found this in my search for a smarter way of parsing these long output files I was generating other than manually, but I don’t have Perl installed anywhere and I really felt that I should be able to do this natively using the tools readily available to me on my Windows 7 laptop, so I looked to PowerShell for a solution. 

Reading the file data in is accomplished using Get-Content which returns a collection of strings.  At first I tried casting this to a [string] object directly, but soon found out that when I did this the resulting string was missing the carriage returns and line feeds that existed in the original file.  This posed a couple of problems for me in parsing the outputs initially, but doing a [string]::Join operation using [Environment]::NewLine as the concatenater allowed an appropriately CRLF delimited string to be returned. 

$filedata = [string]::Join([Environment]::NewLine,(Get-Content $FileName))

Now that I had my file data loaded into a the $filedate object, the next step was to split this string based on the “sqlio v1.5.SG” header that is output at the beginning of each test run.  Since PowerShell is .NET based, this is easily accomplished by using the Split() method of the System.String object which is System.Type for the $filedata object.

$Results = $filedata.Split( [String[]]"sqlio v1.5.SG", [StringSplitOptions]::RemoveEmptyEntries )

At first I started to write a foreach loop with this object, lets face it old habits die hard, but instead I made use of piping which allows a collection like the one returned by the Split() method to be worked on in a single operation.  Piping in PowerShell is similar to a manufacturing process where an object gets transformed and then passed on until the ultimate result is achieved.  Once an object is piped, it becomes accessible through the use of the $_ variable name, allowing the object to be transformed along the pipeline.  By piping the result of the Split() I was able to write a SELECT statement that utilized Regex to get the information of interest out of each test run.

     select @{Name="Threads"; Expression={[int]([regex]::Match($_, "(\d+)?\sthreads\s(reading|writing)").Groups[1].Value)}},` 
                @{Name="Operation"; Expression={switch ([regex]::Match($_, "(\d+)?\sthreads\s(reading|writing)").Groups[2].Value) 
                                                { 
                                                    "reading" {"Read"} 
                                                    "writing" {"Write"} 
                                                }    }},` 
                @{Name="Duration"; Expression={[int]([regex]::Match($_, "for\s(\d+)?\ssecs").Groups[1].Value)}},` 
                @{Name="IOSize"; Expression={[int]([regex]::Match($_, "\tusing\s(\d+)?KB\s(sequential|random)").Groups[1].Value)}},` 
                @{Name="IOType"; Expression={switch ([regex]::Match($_, "\tusing\s(\d+)?KB\s(sequential|random)").Groups[2].Value) 
                                                { 
                                                    "random" {"Random"} 
                                                    "sequential" {"Sequential"} 
                                                }  }},` 
                @{Name="PendingIO"; Expression={[int]([regex]::Match($_, "with\s(\d+)?\soutstanding").Groups[1].Value)}},` 
                @{Name="FileSize"; Expression={[int]([regex]::Match($_, "\s(\d+)?\sMB\sfor\sfile").Groups[1].Value)}},` 
                @{Name="IOPS"; Expression={[decimal]([regex]::Match($_, "IOs\/sec\:\s+(\d+\.\d+)?").Groups[1].Value)}},` 
                @{Name="MBs_Sec"; Expression={[decimal]([regex]::Match($_, "MBs\/sec\:\s+(\d+\.\d+)?").Groups[1].Value)}},` 
                @{Name="MinLat_ms"; Expression={[int]([regex]::Match($_, "Min.{0,}?\:\s(\d+)?").Groups[1].Value)}},` 
                @{Name="AvgLat_ms"; Expression={[int]([regex]::Match($_, "Avg.{0,}?\:\s(\d+)?").Groups[1].Value)}},` 
                @{Name="MaxLat_ms"; Expression={[int]([regex]::Match($_, "Max.{0,}?\:\s(\d+)?").Groups[1].Value)}}` 

The select returns a new type of object that has Properties named Threads, Operation, Duration, IOSize, IOType, PendingIO, FileSize, IOPS, MBs_sec, MinLat_ms, AvgLat_ms, and MaxLat_ms.  This alone is sufficient to proceed to creating an Excel file for the information, but I wanted the information sorted in the same manner every time I ran this, primarily because I change how I run my tests sometimes, and I like consistency in the output.  Since I am so anal retentive thorough, I fixed this problem by piping the output from the select to the Sort-Object commandlet and sorted by the IOSize, IOType, Operation, and Threads.

Getting the data into Excel was actually fairly simple to do, thanks to Buck Woody(Blog|Twitter) and SQL Rockstar Tom LaRock(Blog|Twitter).  Buck wrote a blog post titled Create an Excel Graph of your Big Tables – With Powershell, and Tom wrote a Simple Talk Article, Monitoring SQL Server Virtual Log File Fragmentation, that use the Interop Excel objects to create an Excel workbook from PowerShell, populate the workbook with data, and build charts using the data.  My code for the Excel integration is based entirely on their examples.

$Excel = New-Object -ComObject Excel.Application 
$Excel.Visible = $true 
$WorkBook = $Excel.WorkBooks.Add() 
$WorkBook.WorkSheets.Item(1).Name = "RawData" 
$WorkBook.WorkSheets.Item(3).Delete() 
$WorkBook.WorkSheets.Item(2).Delete() 
$WorkSheet = $WorkBook.WorkSheets.Item("RawData") 
$x = 2 
$WorkSheet.Cells.Item(1,1) = "Threads" 
$WorkSheet.Cells.Item(1,2) = "Operation" 
$WorkSheet.Cells.Item(1,3) = "Duration" 
$WorkSheet.Cells.Item(1,4) = "IOSize" 
$WorkSheet.Cells.Item(1,5) = "IOType" 
$WorkSheet.Cells.Item(1,6) = "PendingIO" 
$WorkSheet.Cells.Item(1,7) = "FileSize" 
$WorkSheet.Cells.Item(1,8) = "IOPS" 
$WorkSheet.Cells.Item(1,9) = "MBs/Sec" 
$WorkSheet.Cells.Item(1,10) = "Min_Lat(ms)" 
$WorkSheet.Cells.Item(1,11) = "Avg_Lat(ms)" 
$WorkSheet.Cells.Item(1,12) = "Max_Lat(ms)" 
$WorkSheet.Cells.Item(1,13) = "Caption"

$Results | % { 
    $WorkSheet.Cells.Item($x,1) = $_.Threads 
    $WorkSheet.Cells.Item($x,2) = $_.Operation 
    $WorkSheet.Cells.Item($x,3) = $_.Duration 
    $WorkSheet.Cells.Item($x,4) = $_.IOSize 
    $WorkSheet.Cells.Item($x,5) = $_.IOType 
    $WorkSheet.Cells.Item($x,6) = $_.PendingIO 
    $WorkSheet.Cells.Item($x,7) = $_.FileSize 
    $WorkSheet.Cells.Item($x,8) = $_.IOPS 
    $WorkSheet.Cells.Item($x,9) = $_.MBs_Sec 
    $WorkSheet.Cells.Item($x,10) = $_.MinLat_ms 
    $WorkSheet.Cells.Item($x,11) = $_.AvgLat_ms 
    $WorkSheet.Cells.Item($x,12) = $_.MaxLat_ms 
    $WorkSheet.Cells.Item($x,13) = [string]$_.IOSize + "KB " + [string]$_.IOType + " " + ` 
                                [string]$_.Operation + " " + [string]$_.Threads + ` 
                                " Threads " + [string]$_.PendingIO + " pending" 
    $x++}

$WorkBook.Charts.Add() | Out-Null 
$Chart = $WorkBook.ActiveChart 
$Chart.SetSourceData($WorkSheet.Range("H1:H$x")) 
$Chart.SeriesCollection(1).xValues = $WorkSheet.Range("M2:M$x") 
$Chart.SetSourceData($WorkSheet.Range("H1:H$x")) 
$Chart.SeriesCollection(1).xValues = $WorkSheet.Range("M2:M$x") 
$Chart.Name = "IOPS"

$WorkBook.Charts.Add() | Out-Null 
$WorkBook.ActiveChart.SetSourceData($WorkSheet.Range("I1:I$x")) 
$Chart = $WorkBook.ActiveChart 
$Chart.SeriesCollection(1).xValues = $WorkSheet.Range("M2:M$x") 
$Chart.Name = "MBs Sec"

I don’t know why, but the first Chart doesn’t refresh properly unless I call SetSourceData() and set the SeriesCollection xValues properties twice.  Since the code block that loads the data into the worksheet tracks the rows with the $x variable, the Range for the chart is always row 1 to $x.  The H column happens to be the column that holds the IOPS values and the I column holds the MBs Sec values in the workbook.  The only thing missing at this point a parameter declaration at the head of the script file to require an input parameter $FileName which is passed to Get-Content to read the information.

param(    [Parameter(Mandatory=$TRUE)] 
        [ValidateNotNullOrEmpty()] 
        [string] 
        $FileName)

The end result is a set of charts showing your IOPS and MBs Sec results from the test run.

image

image

The complete script is attached to this blog post.  If you use it let me know your thoughts.

Published Tuesday, May 25, 2010 10:12 PM by Jonathan Kehayias
Filed under: 

 Attachment(s): SQLIOResults.zip

posted by LifeisSimple
2011. 3. 22. 10:07 Brain Trainning/DataBase
MSSQL  Sub 디렉토리 생성

EXECUTE master.dbo.xp_create_subdir N'Y:\BackupFolder\TestDB'

GO

 

BACKUP DATABASE [aspsearchDB] TO  DISK = N'Z:\BackupFolder\TestDB\TestDB_backup_2011_03_22_100247_1759220.bak' WITH NOFORMAT, NOINIT,  NAME = N'TestDB_backup_20##_##_##', SKIP, REWIND, NOUNLOAD, COMPRESSION,  STATS = 10

GO

 

이렇게 하면 됩니다. 머리가 나쁘면 계속 뭔가를 기록하게 된다는... 
 



posted by LifeisSimple
2011. 3. 11. 14:30 Brain Trainning/DataBase
한번씩은 다 읽어봐야 하는데 잘 안읽어지는 것들... 난 책이 더 조타~

출처 : http://technet.microsoft.com/en-us/library/cc966534.aspx


posted by LifeisSimple
2011. 3. 11. 09:43 Brain Trainning/DataBase


Top 10 SQL Server Performance Tuning Tips

December 08, 2010 03:03 PM January 2011
SQL Server Magazine

SQL Server is a very complex product. And when it comes to performance tuning, many DBAs simply don't know where to start. There many facets to the program and many things to consider if you are to correctly tune a SQL Server instance. Performance tuning is definitely one of those areas in which experience is the best teacher. Well you need to start somewhere. And, as is common to many things, it all starts by having a good foundation to build upon.

We are going to get back to the basics of performance tuning by learning 10 tips that will help you get off on the right foot when you try to identify existing performance issues and prevent future ones.

Before we start, take a look at the “SQL Server 2005 Waits and Queues” white paper on the SQL Server CAT (Customer Advisory Team) website. It contains example code and much more detail than I can provide here. I highly recommend that you read this white paper and use it as a reference for this article.

TIP 1: STOP WAITING AROUND

Every time that SQL Server tries to do something but gets held up for any reason, it tracks the incident in the form of something known as wait statistics. (See the CAT white paper for more information about wait statistics.) This is one of the areas of SQL Server that you must begin to understand to correctly determine the resources that SQL Server is waiting on at any given moment. For example, if you find that most of your waits are related to a page_IO_latch issue, you can be pretty sure that your bottleneck has to do with I/O. And, if you see many LCK_XX type waits occur, you're seeing a blocking issue. In this case, you would spend your time more wisely by investigating the cause of the blockage instead of by looking at the I/O issues.

The CAT white paper contains plenty of detail about the different wait types, and also code examples for capturing and deciphering the results. After you have read the white paper, examine your waits from time to time so that you can stop chasing your tail and start narrowing down where the real bottleneck lies. This is, without a doubt, where you should start your performance tuning day.

TIP 2: LOCATE I/O BOTTLENECKS

I/O bottlenecks are one of the key reasons why performance suffers in SQL Server. You have three relatively easy methods at your disposal to determine whether you have I/O issues:

  • Check whether you see high page_IO_latch waits or log_write waits in your wait statistics.
  • Use the DMF sys.dm_io_virtual_file_stats() to locate any areas in which you have excessive physical I/O or excessive stalls on that I/O. These issues can occur at the database level or even at the file level. 
  • Use the trusty PerfMon counters. At a minimum, use the Avg. Disk sec/Read and Avg. sec/Write counters to see the latency of the reads and writes. On an OLTP system, you would, ideally, want to see log file latency to be just a few ms and data file latency to be less than 10ms. Remember that these are ideal values. Your system may tolerate larger latency and still be fine. Also keep in mind that many times when you find that the storage subsystem cannot keep up with the current demand, the cause might not be an I/O bottleneck at all. It might, instead, be pooled.

When you find that you have many physical I/O bottlenecks occurring, your first instinct should be to find the queries that are causing all the physical I/O, and then try to tune them before you add more hardware. One performance aspect that you should never ignore is high latency for log writes. If you start to hold up the writing to the log file, all further DML operations can quickly become impeded, and they’ll remain so until you alleviate the bottleneck. High latency in log writes is a sure way to hinder performance in the database. For a better understanding of storage issues and I/O subsystems in a SQL Server environment, I recommend that you read the Microsoft articles “Storage Top 10 Practices” and “SQL Server Best Practices Article.”

 

TIP 3: ROOT OUT PROBLEM QUERIES

In any given SQL Server instance, there are probably 8 to 10 queries or stored procedures that are responsible for 80 to 90 percent of the poorly tuned activity that you see throughout the day. Naturally, this isn't true for everyone, but it’s true often enough to matter. If you can identify these problem queries, and if you can prioritize tuning them, you can make a significant impact on the overall performance of your server.

The CAT white paper contains information about this subject. But one way that you can easily identify expensive statements is by using the code in Listing 1. The sys.dm_exec_query_stats DMV query contains aggregate performance statistics that are associated with each of the cached query plans in the SQL Server instance. This query easily enables you to order the results in several ways, depending on what type of resource usage you want to concentrate on. The key is to weigh the number of times that the statement was executed versus the average resource usage to better determine which statements would have the largest impact on performance if they were optimized better.

I will caution you not to put too much weight on the total elapsed time or on the overall duration of the statement because other factors, such as blocking, can influence the overall duration. But by using this query, you should be able to quickly identify the top offending statements in your system, and then prioritize the statements so that you can tune them as efficiently as possible.

 

 

TIP 4: PLAN TO REUSE

Today's database applications deal with an ever-increasing transaction rate as hardware gets more powerful and less expensive. For this reason, you are often trying to pack more and more into a single server, thus increasing the activity. But one of the most expensive operations in terms of memory and CPU usage in the SQL Server instance is the compiling of query plans. We have also seen an increase in applications that may speed development but that often result in calls to the database that make it nearly impossible to reuse the query plan. I'm talking about applications that do not use stored procedures or that do not correctly parameterize their queries. This can result in extremely poor performance, especially as the number of transactions increases. I highly recommend that you also read the “Plan Caching in SQL Server 2008” white paper at msdn.microsoft.com/en-us/library/ee343986.aspx as an excellent source for describing how SQL Server handles query plans and reuse.

The CAT white paper also goes into some detail about this subject. But the following code example is a quick and easy way to determine which statements are not reusing query plans:

 

SELECT b.[cacheobjtype], b.[objtype], b.[usecounts],    
    a.[dbid], a.[objectid], b.[size_in_bytes], a.[text]
FROM sys.dm_exec_cached_plans as b   
CROSS APPLY sys.dm_exec_sql_text(b.[plan_handle]) AS a
ORDER BY [usecounts] DESC

 

This query sorts all of the plans in the procedure cache in descending order of use counts. The use counts column is incremented every time that a plan is reused, and it lets us easily identify which plans have reuse. You can also order the plans by the text column to determine which statements have many similar entries that have a use count of one. This value indicates statements that you call often but that do not reuse the existing plan. After you have identified these statements, you can prioritize which parts of the application you must work on first to get the biggest bang for your buck in terms of plan reuse. Don’t underestimate how seriously a lack of plan reuse can affect performance as the transaction rate increases.

 

TIP 5: MONITOR INDEX USAGE

The sys.dm_db_index_operational_stats() DMF is a widely underutilized source of information. It can provide you valuable information about your index usage. By using this DMF, you can decipher all kinds of information to determine not only which indexes are used but also how they're used. For example, are you scanning the index or are you using seeks? This DMF will tell you. It will even tell you things such as the time elapsed for processes, such as latching and locking. Have you ever asked yourself, “Is this index being used?” or, ”Which indexes am I using for a given table?” We've all asked these questions at one time or another. So you can use this DMF to get a handle on your index usage.

 

TIP 6: SEPARATE DATA AND LOG FILES

One of the most basic but often disregarded rules for good performance is to separate the data and the log files onto separate physical drive arrays whenever possible. This is especially true when you use DAS, but it also applies to a SAN. The key principle here is to separate the mostly random access of the data files from the sequential access that occurs by writing to the transaction logs. One aspect familiar to a SAN environment is that even though you are presented with different drive letters or LUNs, you cannot be sure that these represent different physical drive arrays.  Often, these apparent drives are carved from the same larger disk array, and this will defeat the intent of separating them in the first place. So make sure that you know what you're really getting when you ask for your storage on a SAN. You will be amazed at how much difference this can make as the volume of your transactions increases.

 

TIP 7: USE SEPARATE STAGING DATABASES

I see far too many production databases that are also used for scratch space when it comes to importing or exporting data. People frequently import data into a table that is real but temporary in nature. Then, they proceed to manipulate the data by performing extensive updates to prepare it for one of the final production tables. The problem with this is that most production databases are in full recovery mode. This means that practically all activity is fully logged in the transaction log, and these operations can be expensive.

By using a separate staging database within the SQL Server instance that is in simple recovery mode, you can achieve several performance benefits. One is that you can often get a minimally logged load instead of a fully logged load. This can dramatically speed up the import process to begin with. With a minimally logged load, the amount of data that's logged to the transaction log is very small in relation to what it would be for a fully logged load, so you have a much lighter burden on the server as a whole.

If you were to perform these operations in the production database, all that activity would have to use resources such as I/O, CPU, and memory. All the preparation would be fully logged in either database; but at least in the staging database, you would not have to back up the log. If this was done in the production database instead, that activity would increase the time required to back up the log. Therefore, the resulting backup files would be larger than necessary. Create a staging database, and perform your initial importing and manipulation in that database to reduce the overall burden on the server and to minimize the impact to the production databases.

 

TIP 8: PAY ATTENTION TO LOG FILES

Too many people underestimate the importance of the transaction log file in relation to performance. The most common mistake people make is not to leave enough free space in the transaction log file to allow for your normal operations to occur without forcing an auto-grow operation. Growing the log file can be extremely time-consuming. It can force any DML operations to wait until the growth is complete before the operation can proceed.

By making sure that you always have plenty of free space in the log file, you can avoid these performance hits altogether. The second most common mistake is to have far too many virtual log files (VLFs) in the transaction log. For an excellent explanation of this practice, see Kimberly L.Tripp's blog post, “Transaction Log VLFs - too many or too few?” (www.sqlskills.com/BLOGS/KIMBERLY/post/Transaction-Log-VLFs-too-many-or-too-few.aspx). Personally, I prefer to have the VLFs set at 512MB each. You can obtain this value by growing the log file in 8GB increments.

 

TIP 9: MINIMIZE TEMPDB CONTENTION

If your application makes heavy use of tempdb, it's possible that you could run into some contention regarding internal structures that are associated with the tempdb files. There are ways to minimize the contention, outlined in the TechNet topic “Working with tempdb in SQL Server 2005.”

This topic goes into great detail about many aspects of tempdb. However, I want to point out that the article recommends that you create one tempdb data file for each processor core. In many cases, this is excessive and can actually degrade performance. What I recommend is that you keep an eye on the page_latch_UP waits for tempdb, and that you increase the number of files until the wait either ends or is close to zero. Just remember to make all the files the exact same size; otherwise, you don't experience any benefit because of the allocation algorithm that's based on the amount of free space in each file. Another thing to keep in mind is not to explicitly drop temporary tables in your code if you can help it. Let SQL Server do that on its own. In this way, you can maximize the effect of the caching enhancements that are available in recent versions of SQL Server.

 

TIP 10: CHANGE THE MAX MEMORY LIMIT

There have been improvements in the 64-bit versions of SQL Server regarding memory allocation and sharing with the OS and other applications, But I have yet to see where leaving the MAX Memory setting at the default is ideal in real life. Even though your host server may be dedicated to SQL Server, there are always other applications or parts of the OS that require memory from time to time or even all the time. Do yourself a favor and set the MAX memory setting to least 1 to 2GB less than the total amount of memory on the server if this is a single instance. If you have multiple instances, or if you are in a multi-instance cluster, you also have to account for that. How much memory you leave depends on what else you have running and how much memory it requires to operate efficiently. But you can adjust this value up or down over time as you see fit.

BONUS TIP: JUST SAY NO TO SHRINKING DATA FILES

OK, this makes 11 tips. But shrinking data files has been a general bad practice for a long time, and it can really impact performance in one of three ways. The shrinking can be very painful to begin with. But because it can cause a lot of fragmentation, your subsequent queries may suffer as a result. And if you don’t have Instant File Initialization turned on, the resultant growth later can also hinder performance and potentially cause time-outs. Although there are times when shrinking a file may be necessary, make sure that you know the impact before you try it. For more details, see Paul Randal's blog post “Why you should not shrink your data files.”

 

MAKE THESE TIPS YOUR OWN

By following these guidelines and keeping this information in mind, you should be able to identify the most common performance issues in SQL Server—and prevent or minimize future ones, as well. By now, you will have noticed that some of these topics will require more reading and some actual experience for you to fully grasp the concepts and techniques. But none of these topics are out-of-reach for the average DBA. And even a beginner has to start somewhere. So why not here?

 

Listing 1: The sys.dm_exec_query_stats DMV Query for Performance Statistics

SELECT

COALESCE(DB_NAME(t.[dbid]),'Unknown') AS [DB Name],

ecp.objtype AS [Object Type],

t.[text] AS [Adhoc Batch or Object Call],

            SUBSTRING(t.[text], (qs.[statement_start_offset]/2) + 1,

            ((CASE qs.[statement_end_offset]

                        WHEN -1 THEN DATALENGTH(t.[text]) ELSE qs.[statement_end_offset] END

                                    - qs.[statement_start_offset])/2) + 1) AS [Executed Statement]

            , qs.[execution_count] AS [Counts]

            , qs.[total_worker_time] AS [Total Worker Time], (qs.[total_worker_time] /

qs.[execution_count]) AS [Avg Worker Time]

            , qs.[total_physical_reads] AS [Total Physical Reads],

(qs.[total_physical_reads] / qs.[execution_count]) AS [Avg Physical Reads]

            , qs.[total_logical_writes] AS [Total Logical Writes],

(qs.[total_logical_writes] / qs.[execution_count]) AS [Avg Logical Writes]

            , qs.[total_logical_reads] AS [Total Logical Reads],

(qs.[total_logical_reads] / qs.[execution_count]) AS [Avg Logical Reads]

            , qs.[total_clr_time] AS [Total CLR Time], (qs.[total_clr_time] /

qs.[execution_count]) AS [Avg CLR Time]

            , qs.[total_elapsed_time] AS [Total Elapsed Time], (qs.[total_elapsed_time]

/ qs.[execution_count]) AS [Avg Elapsed Time]

            , qs.[last_execution_time] AS [Last Exec Time], qs.[creation_time] AS [Creation Time]

FROM sys.dm_exec_query_stats AS qs

    JOIN sys.dm_exec_cached_plans ecp ON qs.plan_handle = ecp.plan_handle

            CROSS APPLY sys.dm_exec_sql_text(qs.sql_handle) AS t

--    ORDER BY [Total Worker Time] DESC

--    ORDER BY [Total Physical Reads] DESC

--    ORDER BY [Total Logical Writes] DESC

--    ORDER BY [Total Logical Reads] DESC

--    ORDER BY [Total CLR Time] DESC

--    ORDER BY [Total Elapsed Time] DESC

            ORDER BY [Counts] DESC

posted by LifeisSimple
2011. 3. 2. 12:18 Brain Trainning/DataBase
http://internalsviewer.codeplex.com/


Project Description

Internals Viewer is a tool for looking into the SQL Server storage engine and seeing how data is physically allocated, organised and stored.

All sorts of tasks performed by a DBA or developer can benefit greatly from knowledge of what the storage engine is doing and how it works

User Guide

Troubleshooting

Previous Versions (Including standalone version)

Features
  • Integration with SSMS (SQL Server Management Studio) 2005 and 2008
    • The application is installed as a SSMS add-in
    • Internals information integrated into the Object Explorer
    • Transaction Log viewer integrated into the Query Results
  • Allocation Map
    • Displays the physical layout of tables and indexes
    • Displays PFS status
    • Overlay pages in the Buffer Pool
  • Page Viewer
    • Displays Data pages including forwarding records and sparse columns
    • Displays Index pages
    • Displays allocation pages (IAM, GAM, SGAM, DCM, and BCM pages)
    • Displays pages with SQL Server 2008 row and page compression

Resources
Last edited Apr 14 2009 at 4:38 AM by DannyGould, version 11

 

Want to leave feedback?
Please use Discussions or Reviews instead.

posted by LifeisSimple
2011. 3. 2. 12:05 Brain Trainning/DataBase
출처 : http://msdn.microsoft.com/ko-kr/library/cc280449.aspx

압축된 테이블 및 인덱스 만들기

SQL Server 2008 R2

SQL Server 2008에서는 테이블 및 인덱스 모두에 대해 행 압축과 페이지 압축이 모두 지원됩니다. 다음과 같은 데이터베이스 개체에 대해 데이터 압축을 구성할 수 있습니다.

  • 힙으로 저장되는 전체 테이블

  • 클러스터형 인덱스로 저장되는 전체 테이블

  • 전체 비클러스터형 인덱스

  • 전체 인덱싱된 뷰

  • 분할된 테이블과 인덱스의 경우 파티션별로 압축 옵션을 구성할 수 있고 개체의 다양한 파티션에 동일한 압축 설정을 구성할 필요가 없습니다.

테이블의 압축 설정은 비클러스터형 인덱스에 자동으로 적용되지 않습니다. 즉, 각 인덱스를 개별적으로 설정해야 합니다. 시스템 테이블에는 압축을 사용할 수 없습니다. 테이블과 인덱스는 CREATE TABLE 및 CREATE INDEX 문을 사용하여 만들 때 압축할 수 있습니다. 테이블, 인덱스 또는 파티션의 압축 상태를 변경하려면 ALTER TABLE 또는 ALTER INDEX 문을 사용하십시오.

참고 참고

기존 데이터가 조각화된 경우 압축을 사용하지 않고 인덱스를 다시 작성하여 인덱스의 크기를 줄일 수 있습니다. 인덱스를 다시 작성할 때 인덱스의 채우기 비율이 적용되므로 인덱스 크기가 증가할 수도 있습니다. 자세한 내용은 채우기 비율을 참조하십시오.

행 및 페이지 압축을 사용할 때 다음 사항을 고려해야 합니다.

  • 압축은 SQL Server 2008 Enterprise Edition 및 Developer Edition에서만 사용할 수 있습니다.

  • 압축하면 한 페이지에 더 많은 행을 저장할 수 있지만 테이블 또는 인덱스의 최대 행 크기는 변경되지 않습니다.

  • 최대 행 크기와 압축 오버헤드를 더한 값이 최대 행 크기 8060바이트를 초과하는 테이블에서는 압축을 사용할 수 없습니다. 예를 들어 c1 char(8000) 및 c2char(53) 열이 있는 테이블은 추가 압축 오버헤드 때문에 압축할 수 없습니다. vardecimal 저장소 형식이 사용되는 경우 해당 형식을 사용하면 행 크기 검사가 수행됩니다. 행 및 페이지 압축의 경우 개체가 처음 압축될 때 행 크기 검사가 수행된 다음 각 행을 삽입하거나 수정할 때 검사됩니다. 압축에는 다음 두 가지 규칙이 적용됩니다.

    • 고정 길이 형식으로의 업데이트가 항상 성공해야 합니다.

    • 데이터 압축 비활성화에 항상 성공해야 합니다. 압축된 행이 8060바이트 미만으로 페이지에 맞더라도 SQL Server에서는 압축하지 않을 경우 행에 맞지 않는 업데이트 내용을 적용할 수 없도록 합니다.

  • 파티션 목록을 지정하는 경우 개별 파티션에 대한 압축 유형을 ROW, PAGE 또는 NONE으로 설정할 수 있습니다. 파티션 목록을 지정하지 않은 경우에는 모든 파티션이 문에 지정된 데이터 압축 속성을 사용하여 설정됩니다. 테이블이나 인덱스를 만들 때 달리 지정하지 않는 한 데이터 압축이 NONE으로 설정됩니다. 테이블을 수정할 경우에는 달리 지정하지 않는 한 기존 압축이 유지됩니다.

  • 범위를 벗어난 파티션 목록을 지정하면 오류가 발생합니다.

  • 비클러스터형 인덱스는 테이블의 압축 속성을 상속하지 않습니다. 인덱스를 압축하려면 인덱스의 압축 속성을 명시적으로 설정해야 합니다. 기본적으로 인덱스를 만들 때 인덱스의 압축 설정은 NONE으로 설정됩니다.

  • 힙에 클러스터형 인덱스를 만드는 경우 이 클러스터형 인덱스는 다른 압축 상태를 지정하지 않는 한 힙의 압축 상태를 상속합니다.

  • 힙이 페이지 수준 압축을 사용하도록 구성된 경우 페이지는 다음과 같은 방식으로만 페이지 수준 압축을 받습니다.

    • BULK INSERT 구문을 사용하여 데이터를 삽입합니다.

    • INSERT INTO ... WITH (TABLOCK) 구문을 사용하여 데이터를 삽입합니다.

    • ALTER TABLE ... REBUILD 문을 PAGE 압축 옵션과 함께 실행하여 테이블을 다시 작성합니다.

  • DML 작업의 일부로 힙에 할당된 새 페이지에서는 힙이 다시 작성될 때까지 PAGE 압축을 사용하지 않습니다. 압축을 제거하고 다시 적용하거나, 클러스터형 인덱스를 만들거나 제거하여 힙을 다시 작성하십시오.

  • 힙의 압축 설정을 변경하는 경우 힙의 새 행 위치에 대한 포인터를 포함하도록 테이블의 모든 비클러스터형 인덱스를 다시 작성해야 합니다.

  • 온라인이나 오프라인으로 ROW 또는 PAGE 압축을 사용하거나 사용하지 않도록 설정할 수 있습니다. 힙에서 압축을 사용하도록 설정하는 것은 온라인 작업의 경우 단일 스레드입니다.

  • 행 또는 페이지 압축을 사용하거나 사용하지 않도록 설정하기 위한 디스크 공간 요구 사항은 인덱스를 만들거나 다시 작성하는 경우와 같습니다. 분할된 데이터의 경우 한 번에 하나의 파티션에 대해 압축을 사용하거나 사용하지 않도록 설정하여 필요한 공간을 줄일 수 있습니다.

  • 분할된 테이블에서 파티션의 압축 상태를 확인하려면 sys.partitions 카탈로그 뷰의 data_compression 열을 쿼리합니다.

  • 인덱스를 압축하는 경우 리프 수준 페이지는 행 압축과 페이지 압축을 모두 사용하여 압축될 수 있습니다. 리프 수준이 아닌 페이지는 페이지 압축을 받지 않습니다.

  • 큰 값 데이터 형식은 해당 크기 때문에 일반 행 데이터와 별도로 특수 용도 페이지에 저장되기도 합니다. 별도로 저장되는 데이터에는 데이터 압축을 사용할 수 없습니다.

  • SQL Server 2005에서 vardecimal 저장소 형식을 구현한 테이블을 업그레이드해도 해당 설정은 유지됩니다. vardecimal 저장소 형식의 테이블에 행 압축을 적용할 수 있습니다. 그러나 행 압축이 vardecimal 저장소 형식의 상위 집합이므로 vardecimal 저장소 형식을 유지할 필요가 없습니다. vardecimal 저장소 형식과 행 압축을 함께 사용해도 10진수 값이 추가로 압축되지 않습니다. vardecimal 저장소 형식의 테이블에 페이지 압축을 적용할 수 있지만 vardecimal 저장소 형식 열이 추가로 압축되지는 않습니다.

    참고 참고

    SQL Server 2008에서는 vardecimal 저장소 형식을 지원하지만 행 수준 압축으로 동일한 결과를 얻을 수 있으므로 vardecimal 저장소 형식은 더 이상 사용되지 않습니다. Microsoft SQL Server의 이후 버전에서는 이 기능이 제거됩니다. 새 개발 작업에서는 이 기능을 사용하지 않도록 하고, 현재 이 기능을 사용하는 응용 프로그램은 수정하십시오.

데이터 압축 구현에 대한 간략한 설명은 행 압축 구현페이지 압축 구현 및 유니코드 압축 구현을 참조하십시오.

압축 상태를 변경할 경우 테이블 또는 인덱스에 어떤 영향을 주는지 확인하려면 sp_estimate_data_compression_savings 저장 프로시저를 사용합니다. sp_estimate_data_compression_savings 저장 프로시저는 데이터 압축을 지원하는 SQL Server에서만 사용할 수 있습니다.

분할된 테이블 및 인덱스에서 데이터 압축을 사용할 때는 다음 사항을 고려해야 합니다.

  • 범위 분할

    ALTER PARTITION 문을 사용하여 파티션을 분할할 경우 두 파티션이 모두 원래 파티션의 데이터 압축 특성을 상속합니다.

  • 범위 병합

    두 파티션을 병합할 경우 결과 파티션이 대상 파티션의 데이터 압축 특성을 상속합니다.

  • 파티션 전환

    파티션을 전환하려면 파티션의 데이터 압축 속성이 테이블의 압축 속성과 일치해야 합니다.

  • 한 파티션 또는 모든 파티션 다시 작성

    분할된 테이블 또는 인덱스의 압축을 수정하는 데에는 다음과 같은 두 가지 구문 변형을 사용할 수 있습니다.

    • 다음 구문에서는 참조된 파티션만 다시 작성합니다.

      ALTER TABLE <table_name> 
      REBUILD PARTITION = 1 WITH (DATA_COMPRESSION =  <option>)
      
    • 다음 구문에서는 참조되지 않는 파티션의 기존 압축 설정을 사용하여 전체 테이블을 다시 작성합니다.

      ALTER TABLE <table_name> 
      REBUILD PARTITION = ALL 
      WITH (DATA_COMPRESSION = PAGE ON PARTITIONS(<range>),
      ... )
      

    분할된 인덱스에서는 같은 원칙에 따라 ALTER INDEX를 사용합니다.

  • 분할된 클러스터형 인덱스 삭제

    클러스터형 인덱스를 삭제한 경우 파티션 구성표를 수정하지 않으면 해당 힙 파티션에서 데이터 압축 설정이 유지됩니다. 파티션 구성표를 변경하는 경우 모든 파티션이 압축되지 않은 상태로 다시 작성됩니다. 클러스터형 인덱스를 삭제하고 파티션 구성표를 변경하려면 다음 단계를 수행해야 합니다.

    1. 클러스터형 인덱스를 삭제합니다.

    2. 압축 옵션을 지정하는 ALTER TABLE ... REBUILD ... 옵션을 사용하여 테이블을 수정합니다.

    클러스터형 인덱스를 OFFLINE으로 삭제하면 클러스터형 인덱스의 상위 수준만 제거되므로 작업이 상당히 빠르게 수행됩니다. 클러스터형 인덱스를 ONLINE으로 삭제하는 경우 SQL Server에서는 1단계와 2단계에서 한 번씩, 총 두 번에 걸쳐 힙을 다시 작성해야 합니다.

데이터 압축과 복제를 함께 사용할 때는 다음 사항을 고려해야 합니다.

  • 스냅숏 에이전트에서 초기 스키마 스크립트를 생성할 때 새 스키마는 테이블과 해당 인덱스 모두에 대해 동일한 압축 설정을 사용합니다. 압축을 인덱스에 사용하지 않고 테이블에만 사용할 수는 없습니다.

  • 트랜잭션 복제의 경우 아티클 스키마 옵션에 따라 스크립팅할 종속 개체 및 속성이 결정됩니다. 자세한 내용은 sp_addarticle을 참조하십시오.

    배포 에이전트에서는 스크립트를 적용할 때 하위 구독자를 확인하지 않습니다. 압축 복제를 선택하는 경우 하위 구독자에서 테이블을 만들 수 없습니다. 혼합 토폴로지의 경우 압축 복제를 사용하지 마십시오.

  • 병합 복제의 경우 게시 호환성 수준이 스키마 옵션을 재정의하며 스크립팅될 스키마 개체를 결정합니다. 호환성 수준에 대한 자세한 내용은 복제 토폴로지에 SQL Server의 여러 버전 사용을 참조하십시오.

    혼합 토폴로지의 경우 새 압축 옵션을 지원할 필요가 없으면 게시 호환성 수준을 하위 구독자 버전으로 설정해야 합니다. 새 압축 옵션이 필요하면 테이블을 만든 후 구독자에서 테이블을 압축합니다.

다음 표에서는 복제하는 동안 압축을 제어하는 복제 설정을 보여 줍니다.

사용자 의도

테이블 또는 인덱스에 대한 파티션 구성표 복제

압축 설정 복제

스크립팅 동작

파티션 구성표를 복제하고 파티션의 구독자에서 압축 사용

True

True

파티션 구성표와 압축 설정을 모두 스크립팅합니다.

파티션 구성표를 복제하지만 구독자에서 데이터를 압축하지 않음

True

False

파티션 구성표를 스크립팅하지만 파티션의 압축 설정은 스크립팅하지 않습니다.

파티션 구성표를 복제하지 않고 구독자에서 데이터를 압축하지 않음

False

False

파티션이나 압축 설정을 스크립팅하지 않습니다.

게시자에서 모든 파티션이 압축된 경우 구독자에서 테이블을 압축하지만 파티션 구성표를 복제하지 않음

False

True

모든 파티션에서 압축을 사용할 수 있는지 확인합니다.

테이블 수준에서 압축을 스크립팅합니다.

압축은 저장소 엔진에서 발생하므로 데이터는 SQL Server의 다른 구성 요소 대부분에 압축되지 않은 상태로 제공됩니다. 따라서 압축이 다른 구성 요소에 주는 영향은 다음으로 제한됩니다.

  • 대량 가져오기 및 내보내기 작업

    데이터를 내보낼 때는 네이티브 형식으로 내보내더라도 데이터가 압축되지 않는 행 형식으로 출력됩니다. 따라서 내보낸 데이터 파일의 크기가 원본 데이터보다 훨씬 커질 수 있습니다.

    데이터를 가져올 때는 대상 테이블이 압축을 사용하도록 설정된 경우 저장소 엔진에서 데이터를 압축된 행 형식으로 변환합니다. 이로 인해 데이터를 압축되지 않은 테이블로 가져올 때보다 CPU 사용량이 증가할 수 있습니다.

    페이지 압축을 사용하여 힙으로 데이터를 대량으로 가져오는 경우 대량 가져오기 작업에서는 데이터를 삽입할 때 페이지 압축을 사용하여 데이터를 압축하려고 합니다.

  • 백업 및 복원에는 압축이 영향을 주지 않습니다.

  • 로그 전달에는 압축이 영향을 주지 않습니다.

  • 데이터는 서로 다른 페이지 수 및 페이지당 행 수를 사용하여 저장되므로 압축을 사용하도록 설정하면 쿼리 계획이 변경될 수도 있습니다.

  • 데이터 압축은 SQL Server Management Studio에서 데이터 압축 마법사를 통해 지원됩니다.

데이터 압축 마법사를 시작하려면

  • 개체 탐색기에서 테이블, 인덱스 또는 인덱싱된 뷰를 마우스 오른쪽 단추로 클릭하고 저장소를 가리킨 다음 압축을 클릭합니다.

SQL Server의 전체 인스턴스에 대해 압축을 모니터링하려면 SQL Server, Access Methods 개체의 Page compression attempts/sec 및 Pages compressed/sec 카운터를 사용합니다.

개별 파티션에 대한 페이지 압축 통계를 얻으려면 sys.dm_db_index_operational_stats 동적 관리 함수를 쿼리합니다.

다음 예 중 일부는 분할된 테이블을 사용하며 파일 그룹이 있는 데이터베이스를 필요로 합니다. 파일 그룹이 있는 데이터베이스를 만들려면 다음 문을 실행합니다.

CREATE DATABASE TestDatabase
ON  PRIMARY
( NAME = TestDatabase,
    FILENAME = 'C:\Program Files\Microsoft SQL Server\MSSQL10_50.MSSQLSERVER\MSSQL\Data\TestDB.mdf'),
FILEGROUP test1fg
( NAME = TestDBFile1,
    FILENAME = 'C:\Program Files\Microsoft SQL Server\MSSQL10_50.MSSQLSERVER\MSSQL\Data\TestDBFile1.mdf'),
FILEGROUP test2fg
( NAME = TestDBFile2,
    FILENAME = 'C:\Program Files\Microsoft SQL Server\MSSQL10_50.MSSQLSERVER\MSSQL\Data\TestDBFile2.ndf'),
FILEGROUP test3fg
( NAME = TestDBFile3,
    FILENAME = 'C:\Program Files\Microsoft SQL Server\MSSQL10_50.MSSQLSERVER\MSSQL\Data\TestDBFile3.ndf'),
FILEGROUP test4fg
( NAME = TestDBFile4,
    FILENAME = 'C:\Program Files\Microsoft SQL Server\MSSQL10_50.MSSQLSERVER\MSSQL\Data\TestDBFile4.ndf') ;
GO

새 데이터베이스로 전환하려면

USE TestDatabase
GO

1. 행 압축을 사용하는 테이블 만들기

다음 예에서는 테이블을 만들고 압축을 ROW로 설정합니다.

CREATE TABLE T1 
(c1 int, c2 nvarchar(50) )
WITH (DATA_COMPRESSION = ROW);
GO

2. 페이지 압축을 사용하는 테이블 만들기

다음 예에서는 테이블을 만들고 압축을 PAGE로 설정합니다.

CREATE TABLE T2 
(c1 int, c2 nvarchar(50) )
WITH (DATA_COMPRESSION = PAGE);
GO

3. 분할된 테이블에서 DATA_COMPRESSION 옵션 설정

다음 예에서는 이 섹션의 앞부분에 나오는 코드를 사용하여 만든 TestDatabase 테이블을 사용합니다. 이 예에서는 파티션 함수와 구성표를 만든 다음 분할된 테이블을 만들고 테이블의 파티션에 대해 압축 옵션을 지정합니다. 이 예에서 파티션 1은 ROW 압축을 사용하도록 구성되고 나머지 파티션은 PAGE 압축을 사용하도록 구성됩니다.

파티션 함수를 만들려면

CREATE PARTITION FUNCTION myRangePF1 (int)
AS RANGE LEFT FOR VALUES (1, 100, 1000) ;
GO

파티션 구성표를 만들려면

CREATE PARTITION SCHEME myRangePS1
AS PARTITION myRangePF1
TO (test1fg, test2fg, test3fg, test4fg) ;
GO

압축된 파티션을 포함하는 분할된 테이블을 만들려면

CREATE TABLE PartitionTable1 
(col1 int, col2 varchar(max))
ON myRangePS1 (col1) 
WITH 
(
  DATA_COMPRESSION = ROW ON PARTITIONS (1),
  DATA_COMPRESSION = PAGE ON PARTITIONS (2 TO 4)
);
GO

4. 분할된 테이블에서 DATA_COMPRESSION 옵션 설정

다음 예에서는 예 3에서 사용한 데이터베이스를 사용합니다. 이 예에서는 인접하지 않은 파티션에 대한 구문을 사용하여 테이블을 만듭니다.

CREATE TABLE PartitionTable2 
(col1 int, col2 varchar(max))
ON myRangePS1 (col1) 
WITH 
(
  DATA_COMPRESSION = ROW ON PARTITIONS (1,3),
  DATA_COMPRESSION = NONE ON PARTITIONS (2,4)
);
GO

5. 테이블을 수정하여 압축 변경

다음 예에서는 예 1에서 만든 분할되지 않은 테이블에 대한 압축을 변경합니다.

ALTER TABLE T1 
REBUILD WITH (DATA_COMPRESSION = PAGE);
GO

6. 분할된 테이블에서 하나의 파티션에 대한 압축 수정

다음 예에서는 예 3에서 만든 분할된 테이블에 대한 압축을 변경합니다. REBUILD PARTITION = 1 구문을 사용하면 파티션 번호 1만 다시 작성됩니다.

ALTER TABLE PartitionTable1 
REBUILD PARTITION = 1 WITH (DATA_COMPRESSION =  NONE) ;
GO

동일한 작업에서 다음과 같은 대체 구문을 사용하면 테이블의 모든 파티션이 다시 작성됩니다.

ALTER TABLE PartitionTable1 
REBUILD PARTITION = ALL 
WITH (DATA_COMPRESSION = PAGE ON PARTITIONS(1) ) ;
GO

7. 분할된 테이블에서 여러 파티션에 대한 압축 수정

REBUILD PARTITION = ... 구문은 하나의 파티션만 다시 작성할 수 있습니다. 두 개 이상의 파티션을 다시 작성하려면 여러 문을 실행하거나 다음 예를 실행하여 지정되지 않은 파티션에 대한 현재 압축 설정으로 모든 파티션을 다시 작성해야 합니다.

ALTER TABLE PartitionTable1 
REBUILD PARTITION = ALL 
WITH
(
DATA_COMPRESSION = PAGE ON PARTITIONS(1), 
DATA_COMPRESSION = ROW ON PARTITIONS(2 TO 4) 
) ;
GO

8. 인덱스에서 압축 수정

다음 예에서는 예 1에서 만든 테이블을 사용하고 C2 열에 인덱스를 만듭니다.

CREATE NONCLUSTERED INDEX IX_INDEX_1 
    ON T1 (C2) 
WITH ( DATA_COMPRESSION = ROW ) ; 
GO

다음 코드를 실행하여 인덱스를 페이지 압축으로 변경합니다.

ALTER INDEX IX_INDEX_1 
ON T1
REBUILD WITH ( DATA_COMPRESSION = PAGE ) ;
GO

9. 분할된 인덱스에서 단일 파티션에 대한 압축 수정

다음 예에서는 인덱스의 모든 파티션에서 행 압축을 사용하는 분할된 테이블에서 인덱스를 만듭니다.

CREATE CLUSTERED INDEX IX_PartTab2Col1
ON PartitionTable1 (Col1)
WITH ( DATA_COMPRESSION = ROW ) ;
GO

파티션마다 다른 압축 설정을 사용하도록 인덱스를 만들려면 ON PARTITIONS 구문을 사용합니다. 다음 예에서는 인덱스의 파티션 1에서는 행 압축을 사용하고 인덱스의 파티션 2에서 4까지는 페이지 압축을 사용하는 분할된 테이블에서 인덱스를 만듭니다.

CREATE CLUSTERED INDEX IX_PartTab2Col1
ON PartitionTable1 (Col1)
WITH (DATA_COMPRESSION = ROW ON PARTITIONS(1),
    DATA_COMPRESSION = PAGE ON PARTITIONS (2 TO 4 ) ) ;
GO

다음 예에서는 분할된 인덱스의 압축을 변경합니다.

ALTER INDEX IX_PartTab2Col1 ON PartitionTable1
REBUILD PARTITION = ALL 
WITH ( DATA_COMPRESSION = PAGE ON PARTITIONS(1) ) ;
GO

10. 분할된 인덱스에서 여러 파티션에 대한 압축 수정

REBUILD PARTITION = ... 구문은 하나의 파티션만 다시 작성할 수 있습니다. 두 개 이상의 파티션을 다시 작성하려면 여러 문을 실행하거나 다음 예를 실행하여 지정되지 않은 파티션에 대한 현재 압축 설정으로 모든 파티션을 다시 작성해야 합니다.

ALTER INDEX IX_PartTab2Col1 ON PartitionTable1
REBUILD PARTITION = ALL 
WITH
(
DATA_COMPRESSION = PAGE ON PARTITIONS(1), 
DATA_COMPRESSION = ROW ON PARTITIONS(2 TO 4) 
) ;
GO
posted by LifeisSimple
2011. 2. 28. 13:41 Brain Trainning/DataBase

January 27, 2011

Re-generating SQL Server Logins

By Claire Hsu

Microsoft SQL Server stores all login permissions in security catalog system tables. By querying the system tables, database administrators can produce a comprehensive report for login permission including original password, the server and database roles assigned, down to granular level object permission.

What’s the deal with logins and how it affects a database system

It is a common task for a DBA to make sure user login permissions are properly assigned. Depending on the purpose of the logins, we grant an appropriate level of permission to ensure that the login can perform the tasks it needs to do and nothing more.

We replicate back-ends into multiple environments quite often. Each of them serves a different purpose, like development, testing, user acceptance testing, pilot and production environments. The very same login may have different levels of permission in each environment. Permissions are tightened up gradually from development to QA, then from QA to UAT, into Pilot and eventually completely secured in Production.

Other than maintaining multiple environments with completely different privilege settings, DBAs also often face tasks like Database/Server migration, breaking down one server into multiple environments but requiring exactly the same permission settings in all servers. The last thing a DBA wants to face when users from different systems calling in and complaining that their logins don't perform the tasks that used to work before the segregation.

Last but certainly not the least is compliance auditing. More and more companies have gone under extensive scrutinization on the level of permission a login possesses in production systems. Auditors usually wanted to know what logins have elevated privileges and the justification for them. So it comes in handy if a DBA can quickly find the permission setup for all of the logins on monitored servers and take a step ahead to do some clean-up before reviewing login permissions with auditors.

In some cases, logins are required to have privileges downgraded or removed from the production systems. Reverse engineering the login comes in handy to analyze how login permission was configured for the server level, database level and object level before proceeding to any login permissions changes or login removal. With the presence of a reversing script, we can easily revert to its original setting in case the login changes impacted normal application or user operations.

MS SQL Server has stored all login permissions on security catalog system tables. By querying the system tables, we can re-generate a SQL statement to cover from the very beginning on how a login is generated along with the original password, getting server and database roles assigned, and down to granular level object permission. We can also produce a comprehensive report for login permission by combining information from system metadata.

Login properties

We will start by creating a login along with the properties. There are four types of logins within MS SQL Server: SQL Server login, Windows logins, certificate-mapped logins, and asymmetric key-mapped logins. Here, we will focus on SQL Server login and Windows login since they are the most commonly used. Password generation will be covered in the script but not DROP LOGIN statement. This is to avoid any accidental login drop on the production system. We will utilize sp_hexadecimal from Microsoft support and extract some code from sp_help_revlogin to generate a hashed password and sid.

Starting with SQL 2005, you can enable password policy, password expiration and force user to change password at next login for SQL Server login, much like the password policy defined for our Windows logins. Password check policy is on by default, whereas password expiration policy is off by default. These two policies are only enforced on Windows 2003 or later. If you want user to change the password at the next login, then both policies will automatically set as ON. Since these policies are only for SQL Server logins, you will notice that for Windows logins, none of these properties will be included in the generated statement.

Pre-requisite utility stored procedure

Before we look into how to generate a create statement for both SQL Server login and Windows login for MS SQL Server. You need to create stored the procedure sp_hexadecimal as a pre-requisite. Stored procedure sp_hexadecimal works on SQL 2005, 2008 and 2008 R2.

---------------------------------------
-- Stored Procedure sp_hexadecimal
---------------------------------------
 
USE master
GO
IF OBJECT_ID ('sp_hexadecimal') IS NOT NULL
    DROP PROCEDURE sp_hexadecimal
GO
CREATE PROCEDURE sp_hexadecimal
    @binvalue varbinary(256),
    @hexvalue varchar (514) OUTPUT
AS
    DECLARE @charvalue varchar (514)
    DECLARE @i int
    DECLARE @length int
    DECLARE @hexstring char(16)
    SELECT @charvalue = '0x'
    SELECT @i = 1
    SELECT @length = DATALENGTH (@binvalue)
    SELECT @hexstring = '0123456789ABCDEF'
    WHILE (@i <= @length)
    BEGIN
        DECLARE @tempint int
        DECLARE @firstint int
        DECLARE @secondint int
        SELECT @tempint = CONVERT(int, SUBSTRING(@binvalue,@i,1))
        SELECT @firstint = FLOOR(@tempint/16)
        SELECT @secondint = @tempint - (@firstint*16)
        SELECT @charvalue = @charvalue +
        SUBSTRING(@hexstring, @firstint+1, 1) +
        SUBSTRING(@hexstring, @secondint+1, 1)
        SELECT @i = @i + 1
    END
 
SELECT @hexvalue = @charvalue
GO

Reverse engineering Login creation

Now we can proceed with generating a create login statement for both SQL Server login and Windows login. Replace 'YourLoginName' with the login you would like to generate. This script works on SQL 2005, SQL 2008 and SQL 2008 R2. Assuming you have adequate permission on the instance to run all subsequent scripts, execute the following code.

----------------------------------------------
--Login Pre-requisites 
----------------------------------------------
 
USE master
go
SET NOCOUNT ON 
DECLARE @login_name varchar(100)
SET @login_name = 'YourLoginName'
 
IF NOT EXISTS (SELECT * FROM sys.server_principals WHERE name = @login_name AND type IN ('G','U','S'))
BEGIN 
          PRINT 'Please input valid login name'
          RETURN
END
 
DECLARE @login_sid varbinary(85)
SELECT @login_sid = sid FROM sys.server_principals WHERE name = @login_name
 
DECLARE @maxid int
IF OBJECT_ID('tempdb..#db_users') is not null
DROP TABLE #db_users 
SELECT id = identity(int,1,1), sql_cmd = 'SELECT '''+name+''', 
	* FROM ['+name+'].sys.database_principals' INTO #db_users FROM sys.sysdatabases
 
SELECT @maxid = @@ROWCOUNT
 
 
---------------------------------------------
--Retrieve hashed password and hashed sid 
---------------------------------------------
IF EXISTS (SELECT * FROM sys.server_principals WHERE type = 'S' and name = @login_name )
BEGIN 
          DECLARE @PWD_varbinary  varbinary (256)
          SET @PWD_varbinary = CAST( LOGINPROPERTY( @login_name, 'PasswordHash' ) AS varbinary (256) )
 
          DECLARE @SID_string varchar (514)
          DECLARE @PWD_string  varchar (514)
 
          EXEC sp_hexadecimal @PWD_varbinary, @PWD_string OUT
          EXEC sp_hexadecimal @login_sid,     @SID_string OUT
END
--select @SID_string
--select @PWD_string
 
----------------------------------------------
--Login Properties
----------------------------------------------
PRINT '----------------------------------------------'
PRINT '--SET Login Properties'
PRINT '----------------------------------------------'
 
DECLARE @login_sqlcmd varchar(1000)
SET @login_sqlcmd = ''
SELECT @login_sqlcmd = '-- LOGIN ['+@login_name+'] IS '+case is_disabled WHEN 1 THEN 'DISABLED' ELSE 'ENABLED' 
	END FROM  sys.server_principals WHERE name = @login_name
 
 
IF EXISTS (SELECT * FROM sys.sql_logins WHERE name = @login_name)
BEGIN 
          SELECT @login_sqlcmd = @login_sqlcmd+ char(10)+'CREATE LOGIN '+ QUOTENAME(@login_name)+' 
				WITH PASSWORD = ' + @PWD_string + ' HASHED, SID = ' + @SID_string + ', 
				DEFAULT_DATABASE = ['+default_database_name+'], DEFAULT_LANGUAGE = ['+default_language_name+']' 
				FROM sys.server_principals WHERE name = @login_name
          SELECT @login_sqlcmd = @login_sqlcmd + ', CHECK_POLICY' + CASE is_policy_checked 
				WHEN 0 THEN '=OFF' ELSE '=ON' END FROM sys.sql_logins WHERE name = @login_name
          SELECT @login_sqlcmd = @login_sqlcmd + ', CHECK_EXPIRATION' + CASE is_expiration_checked 
				WHEN 0 THEN '=OFF' ELSE '=ON' END FROM sys.sql_logins WHERE name = @login_name
          SELECT @login_sqlcmd = @login_sqlcmd+ char(10)+'ALTER LOGIN ['+@login_name+'] 
				WITH DEFAULT_DATABASE = ['+default_database_name+'], 
				DEFAULT_LANGUAGE = ['+default_language_name+']' FROM sys.server_principals WHERE name = @login_name
END
ELSE
BEGIN 
          SELECT @login_sqlcmd = @login_sqlcmd+ char(10)+'CREATE LOGIN ' + QUOTENAME( @login_name ) + ' 
				FROM WINDOWS WITH DEFAULT_DATABASE = [' + default_database_name + ']' 
				FROM sys.server_principals WHERE name = @login_name
END
 
PRINT @login_sqlcmd 

By executing the above script, you will see a result similar to the snapshot below in the SSMS result pane. [Refer Fig 1.0 and Fig 1.1]

 
Fig 1.0

For Windows login, the result looks like this. There will be no hashed password, sid and password policy defined for Windows login.

 
Fig 1.1

Server level role permission for a login

Now we have the script used to recover login create, but a login without any permission doesn’t serve any purpose. So next we will see how to recover sever level permission for a login by running the statement below on SSMS.

----------------------------------------------
--Login Pre-requisites 
----------------------------------------------
 
USE master
go
SET NOCOUNT ON 
DECLARE @login_name varchar(100)
SET @login_name = 'YourLoginName'
 
IF NOT EXISTS (SELECT * FROM sys.server_principals WHERE name = @login_name AND type IN ('G','U','S'))
BEGIN 
          PRINT 'Please input valid login name'
          RETURN
END
 
DECLARE @login_sid varbinary(85)
SELECT @login_sid = sid FROM sys.server_principals WHERE name = @login_name
 
DECLARE @maxid int
IF OBJECT_ID('tempdb..#db_users') is not null
DROP TABLE #db_users 
SELECT id = identity(int,1,1), sql_cmd = 'SELECT '''+name+''', * FROM ['+name+'].sys.database_principals' INTO #db_users FROM sys.sysdatabases 
SELECT @maxid = @@ROWCOUNT
 
 
----------------------------------------------
--Grant Server Role to login 
----------------------------------------------
PRINT ''
PRINT '----------------------------------------------'
PRINT '--Grant Server Role to login '
PRINT '----------------------------------------------'
 
IF OBJECT_ID('tempdb..#srvrole') IS NOT NULL
DROP TABLE #srvrole
 
CREATE TABLE #srvrole(ServerRole sysname, MemberName sysname, MemberSID varbinary(85))  
INSERT INTO [#srvrole] EXEC sp_helpsrvrolemember 
 
DECLARE @login_srvrole varchar(1000)
SET @login_srvrole = ''
IF EXISTS (SELECT 1 FROM #srvrole WHERE[MemberName] = @login_name) 
BEGIN 
          SELECT @login_srvrole = @login_srvrole + 'EXEC sp_addsrvrolemember '''+MemberName+''',
				'''+ServerRole+'''' FROM #srvrole 
          WHERE [MemberName] = @login_name
          PRINT @login_srvrole 
END
ELSE
BEGIN 
          PRINT '--Login ['+@login_name+'] is not a member of any server level role'
END

You can see a result similar to the snapshot below on the result pane. [Refer Fig 1.2]

 
Fig 1.2

If the login you inquired doesn’t have any server level permission, then the result pane will display as below. [Refer Fig 1.3]

 
Fig 1.3

Database level role permission for a login

Now, a login may be a member of a specific database role. It’s important we don’t miss out restoring database role permission setting for a login.

----------------------------------------------
--Login Pre-requisites 
----------------------------------------------
 
USE master
go
SET NOCOUNT ON 
DECLARE @login_name varchar(100)
SET @login_name = 'YourLoginName'
 
IF NOT EXISTS (SELECT * FROM sys.server_principals WHERE name = @login_name AND type IN ('G','U','S'))
BEGIN 
          PRINT 'Please input valid login name'
          RETURN
END
 
DECLARE @login_sid varbinary(85)
SELECT @login_sid = sid FROM sys.server_principals WHERE name = @login_name
 
DECLARE @maxid int
IF OBJECT_ID('tempdb..#db_users') is not null
DROP TABLE #db_users 
SELECT id = identity(int,1,1), sql_cmd = 'SELECT '''+name+''', * FROM ['+name+'].sys.database_principals' INTO #db_users FROM sys.sysdatabases 
SELECT @maxid = @@ROWCOUNT
 
---------------------------------------------------
--Find out list of db that the login has access to 
---------------------------------------------------
 
IF OBJECT_ID('tempdb..#alldb_users') is not null
DROP TABLE #alldb_users 
 
CREATE TABLE #alldb_users(
          [dbname] [sysname] NOT NULL,
          [name] [sysname] NOT NULL,
          [principal_id] [int] NOT NULL,
          [type] [char](1) NOT NULL,
          [type_desc] [nvarchar](60) NULL,
          [default_schema_name] [sysname] NULL,
          [create_date] [datetime] NOT NULL,
          [modify_date] [datetime] NOT NULL,
          [owning_principal_id] [int] NULL,
          [sid] [varbinary](85) NULL,
          [is_fixed_role] [bit] NOT NULL
) 
 
DECLARE @id int, @sqlcmd varchar(500)
SET @id = 1 
WHILE @id <=@maxid
BEGIN 
          SELECT @sqlcmd = sql_cmd FROM #db_users WHERE id = @id 
          INSERT INTO #alldb_users EXEC (@sqlcmd)
          SET @id = @id + 1 
END
 
DELETE FROM #alldb_users WHERE sid is null 
DELETE FROM #alldb_users WHERE sid <> @login_sid
 
--SELECT * FROM #alldb_users
----------------------------------------------
--granting database role to login 
----------------------------------------------
PRINT ''
PRINT '----------------------------------------------'
PRINT '--Grant database role to login '
PRINT '----------------------------------------------'
 
 
IF OBJECT_ID('tempdb..#dbrole') is not null
DROP TABLE #dbrole
 
create table #dbrole (dbname varchar(100), dbrole varchar (100), dbrole_member varchar(100), 
	sid varbinary(85), default_schema_name varchar(100), login_name varchar(100), db_principal_id int)
DECLARE @dbrole_sqlcmd varchar(max)
SET @dbrole_sqlcmd = ''
SELECT @dbrole_sqlcmd = @dbrole_sqlcmd + 
	'SELECT '''+dbname+''', c.name, b.name, b.sid, b.default_schema_name, d.name, b.principal_id as login_name 
from ['+dbname+'].sys.database_role_members a 
inner join ['+dbname+'].sys.database_principals b on a.member_principal_id = b.principal_id
inner join ['+dbname+'].sys.database_principals c on a.role_principal_id = c.principal_id
left join sys.server_principals d on b.sid = d.sid
'
from #alldb_users 
--SELECT @dbrole_sqlcmd
--PRINT @dbrole_sqlcmd
INSERT INTO #dbrole exec(@dbrole_sqlcmd)
--SELECT * FROM #dbrole
 
DELETE FROM #dbrole WHERE sid <> @login_sid
 
ALTER TABLE #dbrole ADD ID INT identity(1,1)
 
DECLARE @counter int, @maxid2 int, @login_dbrole varchar(max) 
SELECT @maxid2 = MAX(ID) FROM #dbrole
SET @counter = 1
 
--SELECT * FROM #dbrole 
 
IF NOT EXISTS (SELECT * FROM #dbrole )
BEGIN 
          PRINT '--Login ['+@login_name+'] is not a member of any database level role'
          return 
END
 
WHILE @counter <= @maxid2
BEGIN 
SELECT @login_dbrole  = 'USE ['+dbname+']
IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = '''+dbrole_member+''')
BEGIN 
          CREATE USER ['+dbrole_member+'] 
				FOR LOGIN ['+login_name+']'+isnull(' WITH DEFAULT_SCHEMA=['+default_schema_name+']','')+'
END
ALTER USER ['+dbrole_member+'] WITH LOGIN = ['+login_name+']
EXEC sp_addrolemember '''+dbrole+''','''+dbrole_member+'''
 
' FROM #dbrole WHERE ID = @counter
          SELECT @counter = @counter + 1 
          PRINT @login_dbrole 
END 
 
 

If the login does not belong to any database role, then a result similar to the below snapshot will show up in the result pane. [Refer Fig 1.4 and Fig 1.5]

 
Fig 1.4

Otherwise, here is the sample result that denotes what database role the login is a member of.

 
Fig 1.5

We need to make sure that a database user is created for the login we are trying to recover on the database. If the login already has a database user defined for it, then we just need to make sure that a user is re-mapped to match the login's SID. sp_change_users_login will be removed in a future version of Microsoft SQL Server. It is good to start avoiding using it and replace it with ALTER USER WITH LOGIN to perform sid remap.

Consolidate above works

By combing all three scripts, you can generate a script that does the following:

1.    Create login with original password, original sid, default database setting along with password policy setting

2.    Server level role permission

3.    Database level role permission

The script below demonstrates how to combine all three scripts into one. It is assumed you have created stored procedure sp_hexadecimal by running previous scripts.

----------------------------------------------
--Login Pre-requisites 
----------------------------------------------
 
USE master
go
SET NOCOUNT ON 
DECLARE @login_name varchar(100)
SET @login_name = 'YourLoginName'
 
IF NOT EXISTS (SELECT * FROM sys.server_principals WHERE name = @login_name AND type IN ('G','U','S'))
BEGIN 
          PRINT 'Please input valid login name'
          RETURN
END
 
DECLARE @login_sid varbinary(85)
SELECT @login_sid = sid FROM sys.server_principals WHERE name = @login_name
 
DECLARE @maxid int
IF OBJECT_ID('tempdb..#db_users') is not null
DROP TABLE #db_users 
SELECT id = identity(int,1,1), sql_cmd = 'SELECT '''+name+''', * FROM ['+name+'].sys.database_principals' INTO #db_users FROM sys.sysdatabases 
SELECT @maxid = @@ROWCOUNT
 
 
 
---------------------------------------------
--Retrieve hashed password and hashed sid 
---------------------------------------------
IF EXISTS (SELECT * FROM sys.server_principals WHERE type = 'S' and name = @login_name )
BEGIN 
          DECLARE @PWD_varbinary  varbinary (256)
          SET @PWD_varbinary = CAST( LOGINPROPERTY( @login_name, 'PasswordHash' ) AS varbinary (256) )
 
          DECLARE @SID_string varchar (514)
          DECLARE @PWD_string  varchar (514)
 
          EXEC sp_hexadecimal @PWD_varbinary, @PWD_string OUT
          EXEC sp_hexadecimal @login_sid,     @SID_string OUT
END
--select @SID_string
--select @PWD_string
----------------------------------------------
--Login Properties
----------------------------------------------
PRINT '----------------------------------------------'
PRINT '--SET Login Properties'
PRINT '----------------------------------------------'
 
DECLARE @login_sqlcmd varchar(1000)
SET @login_sqlcmd = ''
SELECT @login_sqlcmd = '-- LOGIN ['+@login_name+'] IS '+case is_disabled 
	WHEN 1 THEN 'DISABLED' ELSE 'ENABLED' END FROM  sys.server_principals WHERE name = @login_name
 
 
IF EXISTS (SELECT * FROM sys.sql_logins WHERE name = @login_name)
BEGIN 
          SELECT @login_sqlcmd = @login_sqlcmd+ char(10)+'CREATE LOGIN '+ QUOTENAME(@login_name)+' 
				WITH PASSWORD = ' + @PWD_string + ' HASHED, SID = ' + @SID_string + ', 
				DEFAULT_DATABASE = ['+default_database_name+']' FROM sys.server_principals WHERE name = @login_name
          SELECT @login_sqlcmd = @login_sqlcmd + ', CHECK_POLICY' + CASE is_policy_checked 
				WHEN 0 THEN '=OFF' ELSE '=ON' END FROM sys.sql_logins WHERE name = @login_name
          SELECT @login_sqlcmd = @login_sqlcmd + ', CHECK_EXPIRATION' + CASE is_expiration_checked 
				WHEN 0 THEN '=OFF' ELSE '=ON' END FROM sys.sql_logins WHERE name = @login_name
          SELECT @login_sqlcmd = @login_sqlcmd+ char(10)+'ALTER LOGIN ['+@login_name+'] 
				WITH DEFAULT_DATABASE = ['+default_database_name+']' FROM sys.server_principals WHERE name = @login_name
END
ELSE
BEGIN 
          SELECT @login_sqlcmd = @login_sqlcmd+ char(10)+'CREATE LOGIN ' + QUOTENAME( @login_name ) + 
				' FROM WINDOWS WITH DEFAULT_DATABASE = [' + default_database_name + ']' 
				FROM sys.server_principals WHERE name = @login_name
END
 
PRINT @login_sqlcmd 
 
----------------------------------------------
--Grant Server Role to login 
----------------------------------------------
PRINT ''
PRINT '----------------------------------------------'
PRINT '--Grant Server Role to login '
PRINT '----------------------------------------------'
 
IF OBJECT_ID('tempdb..#srvrole') IS NOT NULL
DROP TABLE #srvrole
 
CREATE TABLE #srvrole(ServerRole sysname, MemberName sysname, MemberSID varbinary(85))  
INSERT INTO [#srvrole] EXEC sp_helpsrvrolemember 
 
DECLARE @login_srvrole varchar(1000)
SET @login_srvrole = ''
IF EXISTS (SELECT 1 FROM #srvrole WHERE[MemberName] = @login_name) 
BEGIN 
          SELECT @login_srvrole = @login_srvrole + 'EXEC sp_addsrvrolemember '''+MemberName+''',
				'''+ServerRole+''''+CHAR(10) FROM #srvrole 
          WHERE [MemberName] = @login_name
          PRINT @login_srvrole 
END
ELSE
BEGIN 
          PRINT 'Login ['+@login_name+'] is not a member of any server level role'
END
 
---------------------------------------------------
--Find out list of db that the login has access to 
---------------------------------------------------
 
IF OBJECT_ID('tempdb..#alldb_users') is not null
DROP TABLE #alldb_users 
 
CREATE TABLE #alldb_users(
          [dbname] [sysname] NOT NULL,
          [name] [sysname] NOT NULL,
          [principal_id] [int] NOT NULL,
          [type] [char](1) NOT NULL,
          [type_desc] [nvarchar](60) NULL,
          [default_schema_name] [sysname] NULL,
          [create_date] [datetime] NOT NULL,
          [modify_date] [datetime] NOT NULL,
          [owning_principal_id] [int] NULL,
          [sid] [varbinary](85) NULL,
          [is_fixed_role] [bit] NOT NULL
) 
 
DECLARE @id int, @sqlcmd varchar(500)
SET @id = 1 
WHILE @id <=@maxid
BEGIN 
          SELECT @sqlcmd = sql_cmd FROM #db_users WHERE id = @id 
          INSERT INTO #alldb_users EXEC (@sqlcmd)
          SET @id = @id + 1 
END
 
DELETE FROM #alldb_users WHERE sid is null 
DELETE FROM #alldb_users WHERE sid <> @login_sid
 
--SELECT * FROM #alldb_users
----------------------------------------------
--granting database role to login 
----------------------------------------------
PRINT ''
PRINT '----------------------------------------------'
PRINT '--Grant database role to login '
PRINT '----------------------------------------------'
 
 
IF OBJECT_ID('tempdb..#dbrole') is not null
DROP TABLE #dbrole
 
create table #dbrole (dbname varchar(100), dbrole varchar (100), dbrole_member varchar(100), 
	sid varbinary(85), default_schema_name varchar(100), login_name varchar(100), db_principal_id int)
DECLARE @dbrole_sqlcmd varchar(max)
SET @dbrole_sqlcmd = ''
SELECT @dbrole_sqlcmd = @dbrole_sqlcmd + 'SELECT '''+dbname+''', c.name, b.name, b.sid, 
	b.default_schema_name, d.name, b.principal_id as login_name 
from ['+dbname+'].sys.database_role_members a 
inner join ['+dbname+'].sys.database_principals b on a.member_principal_id = b.principal_id
inner join ['+dbname+'].sys.database_principals c on a.role_principal_id = c.principal_id
left join sys.server_principals d on b.sid = d.sid
'
from #alldb_users 
--SELECT @dbrole_sqlcmd
--PRINT @dbrole_sqlcmd
INSERT INTO #dbrole exec(@dbrole_sqlcmd)
--SELECT * FROM #dbrole
 
DELETE FROM #dbrole WHERE sid <> @login_sid
 
ALTER TABLE #dbrole ADD ID INT identity(1,1)
 
DECLARE @counter int, @maxid2 int, @login_dbrole varchar(max) 
SELECT @maxid2 = MAX(ID) FROM #dbrole
SET @counter = 1
 
--SELECT * FROM #dbrole 
 
IF NOT EXISTS (SELECT * FROM #dbrole )
BEGIN 
          PRINT '--Login ['+@login_name+'] is not a member of any database level role'
          return 
END
 
WHILE @counter <= @maxid2
BEGIN 
SELECT @login_dbrole  = 'USE ['+dbname+']
IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = '''+dbrole_member+''')
BEGIN 
          CREATE USER ['+dbrole_member+'] FOR LOGIN ['+login_name+']'+isnull
				(' WITH DEFAULT_SCHEMA=['+default_schema_name+']','')+'
END
ALTER USER ['+dbrole_member+'] WITH LOGIN = ['+login_name+']
EXEC sp_addrolemember '''+dbrole+''','''+dbrole_member+'''
 
' FROM #dbrole WHERE ID = @counter
          SELECT @counter = @counter + 1 
          PRINT @login_dbrole 
END 

In the next article, I will continue to drill down the server and database securable permission for designated login.


Re-generating SQL Server Logins, Part 2

By Claire Hsu

This is the second article on my Re-generate SQL Server login series. In my first column, I focused on server-level role and database-level role assignment for any assigned login. I also mentioned frequent DBA encountered tasks related to ensuring login properties setup and what kind of implications might be created if login permission is not properly defined. In this portion of the series, I’d like to walk through list of securables and how to generate login to include securable granting and denying.

What is a securable?

Simply put, any entity within the database engine that can be secured with permission is called securable. The highest levels of securables are servers and databases, mid-level securables are assemblies, full-text catalogs, message types, etc., and the finest level securables are table, view, function, stored procedure, etc. We can grant, deny and revoke permission on securables.

Server-level permission

Below is the list of server-level permissions. The list on the left also includes the permission defined on the right. For instance, once we grant a login control server permission, the login will also have privilege to do bulk operation, create and alter any database, create unsafe assembly and shutdown the SQL instance. We need to be extra careful when granting anyone server-level permission. One thing worth mentioning is that by default all users have permission to "VIEW ANY DATABASE." Also, to grant server-level permission, we need to be in the master database to issue the server scope granting command and we either need to be a sysadmin or possess the server-level permission and higher with GRANT option to give others permission on server level. The server-level permission information is stored on sys.server_permissions.

Server Permission

Finer Server Permission

CONTROL SERVER

ADMINISTER BULK OPERATIONS

CONTROL SERVER

ALTER ANY CONNECTION

CONTROL SERVER

ALTER ANY CREDENTIAL

CONTROL SERVER

ALTER ANY DATABASE

CONTROL SERVER

ALTER ANY ENDPOINT

CONTROL SERVER

ALTER ANY EVENT NOTIFICATION

CONTROL SERVER

ALTER ANY LINKED SERVER

CONTROL SERVER

ALTER ANY LOGIN

CONTROL SERVER

ALTER ANY SERVER AUDIT

CONTROL SERVER

ALTER RESOURCES

CONTROL SERVER

ALTER SERVER STATE

CONTROL SERVER

ALTER SETTINGS

CONTROL SERVER

ALTER TRACE

CONTROL SERVER

AUTHENTICATE SERVER

CONTROL SERVER

CONNECT SQL

CONTROL SERVER

CONTROL SERVER

CONTROL SERVER

EXTERNAL ACCESS ASSEMBLY

CONTROL SERVER

SHUTDOWN

CONTROL SERVER

UNSAFE ASSEMBLY

CONTROL SERVER

VIEW ANY DEFINITION

ALTER ANY EVENT NOTIFICATION

CREATE DDL EVENT NOTIFICATION

ALTER ANY EVENT NOTIFICATION

CREATE TRACE EVENT NOTIFICATION

ALTER ANY ENDPOINT

CREATE ENDPOINT

ALTER SERVER STATE

VIEW SERVER STATE

ALTER ANY DATABASE

CREATE ANY DATABASE

VIEW ANY DEFINITION

VIEW ANY DATABASE

Permission on database-level securable

Unlike server-level permission that you can only define when connecting to the master database, each database has its list of pre-defined class of securable that you can grant permission on database principal (database user). Here is the list of securable class on which permission exists:

  • Database
  • Object or column
  • Schema
  • Database principal
  • Assembly
  • Type
  • XML schema collection
  • Message type (Service Broker securable)
  • Service contract (Service Broker securable)
  • Service (Service Broker securable)
  • Remote Service Binding (Service Broker securable)
  • Route (Service Broker securable)
  • Full-Text Catalog
  • Symmetric Key
  • Certificate
  • Asymmetric Key

Database-level Permission information is stored on sys.database_permissions. Unless sys.server_permissions, sys.database_permissions exists for every database.

Grant, revoke and deny

The definition for GRANT, REVOKE and DENY can be confusing sometimes. Simply put, revoke is used to remove the permission that has been GRANTED or DENIED on the same securable.

But what if we grant permission to a lower scope securable and then issue DENY or REVOKE on a higher scope securable, what will happen to the lower scope securable? No surprise, there is a difference.

When Denying on a higher-level securable, the principal will be denied on the lower-level securable.

When Revoking on a higher-level securable, the principal will keep his granted permission on the lower scope securable.

A principal can grant permission to another principal if his permission is granted with grant option.

Generate script for server and database securable class

We have briefly gone over the available list of server-level and database-level securable class. We often face tasks like listing out what permission a login has. In my first article in this series, we talked about server role and database role assignment for any given login. But there are cases where login was given more specific permission that was not given through server or database role but through server and database securable. This is the most easily forgotten part of login permission.

It would be nice to simply input the login name that we are interested in and get ready to use script to recover permission on server level and database level securable for the specified principal. In this article, I am only focusing on database and server-level securables. If you are interested to see how to generate login creation, server role assignment or database role assignment, please refer to the first article of the series.

Now, we can look into the script. To use the script, here are the steps. This script works on SQL 2005, SQL 2008 and SQL 2008 R2.

1.    Open Microsoft SQL Server Management Studio (a.k.a SSMS), make a connection to the SQL instance, then click New Query. Make sure the credential you use to run the code has adequate privileges to generate the script.

2.    Copy below code into the query window

3.    Replace 'YourloginName' with the login you would like to re-generate securable permission script, then execute

 

----------------------------------------------

--Login Pre-requisites

----------------------------------------------

set concat_null_yields_null off

USE master

go

SET NOCOUNT ON

DECLARE @login_name varchar(100)

SET @login_name = 'YourLoginName'

 

-----------------------------------------------------------------

 

IF lower(@login_name) IN ('sa','public')

BEGIN

          RAISERROR (15405,11,1,@login_name)

          RETURN

END

 

 

IF NOT EXISTS (SELECT * FROM sys.server_principals WHERE name = @login_name AND type IN ('G','U','S'))

BEGIN

          PRINT 'Please input valid login name'

          RETURN

END

 

 

DECLARE @login_sid varbinary(85)

SELECT @login_sid = sid FROM sys.server_principals WHERE name = @login_name

 

DECLARE @maxid int

 

IF OBJECT_ID('tempdb..#db_users') is not null

BEGIN

          DROP TABLE #db_users

END

 

SELECT id = identity(int,1,1), sql_cmd = 'SELECT '''+name+''', * FROM ['+name+'].sys.database_principals' INTO #db_users FROM sys.databases

WHERE state_desc <> 'OFFLINE'

 

SELECT @maxid = @@ROWCOUNT

 

 

----------------------------------------------

--Create Server Role Temp table

----------------------------------------------

 

IF OBJECT_ID('tempdb..#srvrole') IS NOT NULL

BEGIN

          DROP TABLE #srvrole

END

 

CREATE TABLE #srvrole(ServerRole sysname, MemberName sysname, MemberSID varbinary(85)) 

INSERT INTO [#srvrole] EXEC sp_helpsrvrolemember

 

DECLARE @login_srvrole varchar(1000)

SET @login_srvrole = ''

IF EXISTS (select * from [#srvrole] where ServerRole = 'sysadmin' AND MemberName = @login_name)

BEGIN

         

          PRINT '--Login ['+@login_name+'] is part of sysadmin server role, hence possesses full privileges for SQL instance: '+@@servername

          PRINT 'GO'

          SELECT @login_srvrole = @login_srvrole + 'EXEC sp_addsrvrolemember '''+MemberName+''','''+ServerRole+''''+CHAR(10) FROM #srvrole

          WHERE [MemberName] = @login_name

          PRINT @login_srvrole

          RETURN

          RETURN

END

 

---------------------------------------------------

--Find out list of db that the login has access to

---------------------------------------------------

 

PRINT ''

PRINT '----------------------------------------------'

PRINT '--Create database user for login '

PRINT '----------------------------------------------'

 

IF OBJECT_ID('tempdb..#alldb_users') is not null

BEGIN

          DROP TABLE #alldb_users

END

 

CREATE TABLE #alldb_users(

          [dbname] [sysname] NOT NULL,

          [name] [sysname] NOT NULL,

          [principal_id] [int] NOT NULL,

          [type] [char](1) NOT NULL,

          [type_desc] [nvarchar](60) NULL,

          [default_schema_name] [sysname] NULL,

          [create_date] [datetime] NOT NULL,

          [modify_date] [datetime] NOT NULL,

          [owning_principal_id] [int] NULL,

          [sid] [varbinary](85) NULL,

          [is_fixed_role] [bit] NOT NULL

)

 

DECLARE @id int, @sqlcmd varchar(500)

SET @id = 1

WHILE @id <=@maxid

BEGIN

          SELECT @sqlcmd = sql_cmd FROM #db_users WHERE id = @id

          INSERT INTO #alldb_users EXEC (@sqlcmd)

          SET @id = @id + 1

END

 

 

DELETE FROM #alldb_users WHERE sid is null

 

DELETE FROM #alldb_users WHERE sid <> @login_sid

 

IF NOT EXISTS (SELECT * FROM #alldb_users )

BEGIN

          PRINT '--Login ['+@login_name+'] doesnt have access to any database'

END

 

DECLARE @name sysname, @dbname sysname, @schema sysname, @dbuser_cmd varchar(8000)

DECLARE dbuser_cursor CURSOR FAST_FORWARD FOR

SELECT dbname, name, default_schema_name

FROM #alldb_users

 

OPEN dbuser_cursor

FETCH NEXT FROM dbuser_cursor INTO @dbname, @name, @schema

WHILE @@FETCH_STATUS = 0

BEGIN

 

IF @schema IS NOT NULL

BEGIN    

SELECT @dbuser_cmd = 'USE ['+dbname+']

IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = '''+name+''')

BEGIN

          CREATE USER ['+@name+'] FOR LOGIN ['+@login_name+']'+isnull(' WITH DEFAULT_SCHEMA=['+default_schema_name+']','')+'

END

GO

' FROM #alldb_users WHERE name = @name and dbname = @dbname

END

ELSE

BEGIN

SELECT @dbuser_cmd = 'USE ['+dbname+']

IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = '''+name+''')

BEGIN

          CREATE USER ['+@name+'] FOR LOGIN ['+@login_name+']

END

GO

' FROM #alldb_users WHERE name = @name and dbname = @dbname

END

 

          print @dbuser_cmd

    FETCH NEXT FROM dbuser_cursor INTO @dbname, @name, @schema

    END

 

CLOSE dbuser_cursor

DEALLOCATE dbuser_cursor

 

----------------------------------------------

--Create DB Role Temp table

----------------------------------------------

 

IF OBJECT_ID('tempdb..#dbrole') is not null

BEGIN

          DROP TABLE #dbrole

END

 

create table #dbrole (id int identity(1,1), dbname varchar(100), dbrole varchar (100), dbrole_member varchar(100), sid varbinary(85),default_schema_name varchar(100), login_name varchar(100), db_principal_id int)

DECLARE @dbrole_sqlcmd varchar(max)

DECLARE dbrole_cursor CURSOR FAST_FORWARD FOR

SELECT

'SELECT '''+dbname+''', c.name, b.name, b.sid, b.default_schema_name, d.name, b.principal_id as login_name

from ['+dbname+'].sys.database_role_members a

inner join ['+dbname+'].sys.database_principals b on a.member_principal_id = b.principal_id

inner join ['+dbname+'].sys.database_principals c on a.role_principal_id = c.principal_id

left join sys.server_principals d on b.sid = d.sid

where d.name= '''+@login_name+''''

from #alldb_users

 

OPEN dbrole_cursor

FETCH NEXT FROM dbrole_cursor INTO @dbrole_sqlcmd

WHILE @@FETCH_STATUS = 0

BEGIN

   

    INSERT INTO #dbrole (dbname, dbrole, dbrole_member, sid, default_schema_name, login_name, db_principal_id) exec(@dbrole_sqlcmd)

    FETCH NEXT FROM dbrole_cursor INTO @dbrole_sqlcmd

    END

 

CLOSE dbrole_cursor

DEALLOCATE dbrole_cursor

 

DELETE FROM #dbrole WHERE sid <> @login_sid

 

 

IF EXISTS (SELECT * FROM #dbrole where dbrole = 'db_owner')

BEGIN

          PRINT '----------------------------------------------'

          PRINT'--Login is db_owner of below databases'

          PRINT'----------------------------------------------'

 

END

 

DECLARE @dbname_dbowner varchar(100), @dbrole_member varchar(100)

DECLARE dbowner_cursor CURSOR FAST_FORWARD FOR

SELECT dbname, dbrole_member from #dbrole where dbrole = 'db_owner'

 

OPEN dbowner_cursor

FETCH NEXT FROM dbowner_cursor INTO @dbname_dbowner, @dbrole_member

WHILE @@FETCH_STATUS = 0

BEGIN

 

          PRINT 'USE ['+@dbname_dbowner+']

EXEC sp_addrolemember ''db_owner'','''+@dbrole_member +'''

GO'

    FETCH NEXT FROM dbowner_cursor INTO @dbname_dbowner, @dbrole_member

    END

 

CLOSE dbowner_cursor

DEALLOCATE dbowner_cursor

 

 

--------------------------------------------------------------------------------------------------------

--Find out what database the login has permission to access (avoid restricted and single user database)

--------------------------------------------------------------------------------------------------------

 

DELETE From #srvrole where MemberName <> @login_name

 

IF OBJECT_ID('tempdb..#alldb_users_access') IS NOT NULL

BEGIN

          DROP TABLE #alldb_users_access

END

 

SELECT a.*, collation_name INTO #alldb_users_access FROM #alldb_users a inner join sys.databases b ON a.dbname = b.name

WHERE user_access = 0

OR

(user_access = 2 and exists (SELECT * FROM #srvrole WHERE ServerRole in ('dbcreator','sysadmin')))

OR

(user_access = 2 and a.dbname in (SELECT dbname FROM #dbrole WHERE dbrole = 'db_owner' AND login_name = @login_name))

 

--------------------------------------------------------------------------------------------------------

--Remove database that login doesnt have permission to connect

--------------------------------------------------------------------------------------------------------

 

IF OBJECT_ID('tempdb..#dbconnect') is not null

BEGIN

          DROP TABLE #dbconnect

END

 

CREATE TABLE #dbconnect ( dbname varchar(100), connect_status bit)

 

DECLARE @dbconnect_sqlcmd varchar(1000)

SET @dbconnect_sqlcmd  = ''

DECLARE dbbconnect_cursor CURSOR FAST_FORWARD FOR

SELECT 'select distinct '''+dbname+''', 1 from ['+dbname+'].sys.database_permissions a

inner join ['+dbname+'].sys.database_principals b on a.grantee_principal_id = b.principal_id

inner join ['+dbname+'].sys.server_principals c on b.sid = c.sid

where c.name = '''+@login_name+''''

from #alldb_users_access

 

OPEN dbbconnect_cursor

FETCH NEXT FROM dbbconnect_cursor INTO @dbconnect_sqlcmd

WHILE @@FETCH_STATUS = 0

BEGIN

 

          INSERT INTO #dbconnect exec( @dbconnect_sqlcmd)

    FETCH NEXT FROM dbbconnect_cursor INTO @dbconnect_sqlcmd

    END

 

CLOSE dbbconnect_cursor

DEALLOCATE dbbconnect_cursor

 

insert into #dbconnect

select a.dbname, 0 from #alldb_users_access a left join #dbconnect b on a.dbname = b.dbname

where b.dbname is null

 

---------------------------------------------------------------------------------------

--                                                       Grant all securable class to login

----------------------------------------------------------------------------------------

PRINT ''

PRINT '----------------------------------------------'

PRINT '--Grant all securable class to login '

PRINT '----------------------------------------------'

 

 

IF OBJECT_ID('tempdb..#securable_class') is not null

BEGIN

          DROP TABLE #securable_class

END

 

IF OBJECT_ID('tempdb..#dblevel') is not null

BEGIN

          DROP TABLE #dblevel

END

create table #dblevel (login_name varchar(256), dbname sysname, dbuser_name varchar(100), class_desc varchar(100), permission_name varchar(100),state_desc varchar(100))

 

DECLARE @dblevel_sqlcmd varchar(1000)

DECLARE dblevel_cursor CURSOR FAST_FORWARD FOR

SELECT 'select '''+@login_name+''' as login_name, '''+dbname+''' as dbname, b.name as dbuser_name, a.class_desc, a.permission_name, state_desc from ['+dbname+'].sys.database_permissions a inner join ['+dbname+'].sys.database_principals b

on a.grantee_principal_id = b.principal_id

where b.name in (''public'','''+name+''') and class_desc = ''DATABASE'''

FROM #alldb_users_access

union

SELECT 'select '''+@login_name+''' as login_name, ''master'' as dbname, b.name as dbuser_name, a.class_desc, a.permission_name, state_desc from sys.server_permissions a inner join sys.server_principals b

on a.grantee_principal_id = b.principal_id

where b.name = '''+@login_name+''''

UNION

SELECT 'select '''+@login_name+''' as login_name, ''master'' as dbname, b.name as dbuser_name, a.class_desc, a.permission_name, state_desc from sys.server_permissions a inner join sys.server_principals b

on a.grantee_principal_id = b.principal_id and class_desc = ''SERVER''

where b.name = ''public'''

 

OPEN dblevel_cursor

FETCH NEXT FROM dblevel_cursor INTO @dblevel_sqlcmd

WHILE @@FETCH_STATUS = 0

BEGIN

   

    INSERT INTO #dblevel (login_name, dbname, dbuser_name, class_desc, permission_name, state_desc) EXEC (@dblevel_sqlcmd)

    FETCH NEXT FROM dblevel_cursor INTO @dblevel_sqlcmd

    END

 

CLOSE dblevel_cursor

DEALLOCATE dblevel_cursor

 

SET NOCOUNT ON

 

DELETE FROM #dblevel WHERE permission_name IN ('SELECT','INSERT','UPDATE','DELETE','REFERENCES')

DELETE FROM #dblevel WHERE dbname IN (SELECT dbname FROM #dbrole WHERE sid = @login_sid AND dbrole = 'db_owner')

 

DECLARE @securable_sqlcmd varchar(150)

DECLARE securable_cursor CURSOR FAST_FORWARD FOR

SELECT distinct 'USE ['+dbname+']

GRANT '+permission_name+' TO ['+@login_name+']

GO

' FROM #dblevel

 

OPEN securable_cursor

FETCH NEXT FROM securable_cursor INTO @securable_sqlcmd

WHILE @@FETCH_STATUS = 0

BEGIN

          PRINT @securable_sqlcmd

          FETCH NEXT FROM securable_cursor INTO @securable_sqlcmd

END

CLOSE securable_cursor

DEALLOCATE securable_cursor

 

If you input a bad login name to the script, it will produce an exception; below are the scenarios.

Scenario 1: If you enter either 'sa' or 'public', then you will see results similar to the below snapshot in the SSMS result pane (Fig. 1.1).

Fig 1.1

Scenario 2: If the login you provide doesn't exist on the SQL instance, then you will see results similar to the below snapshot in the SSMS result pane (Fig 1.2)

Fig 1.2

Now we can look into the real scenario about a valid login with permissions on server and database-level securables. By replacing 'YourLoginName' with a valid login, below is an example of how the outcome of the script will look. It contains the creation of the database user for the login and the permission setting for server and database securables. If a login is db_owner for any database or is part of sysadmin server role member, then the resulting script will only contain the execution of system stored procedure sp_addrolemember and sp_addsrvrolemember instead of printing out granting securables one by one.

Fig 1.3

Scenario 4: Input login is member of db_owner on AdventureWorksDW, not for the rest of the databases (Fig 1.4)

----------------------------------------------

--Create database user for login

----------------------------------------------

USE [master]

IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = 'chsu')

BEGIN

          CREATE USER [chsu] FOR LOGIN [chsu] WITH DEFAULT_SCHEMA=[dbo]

END

GO

USE [AdventureWorks]

IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = 'chsu')

BEGIN

          CREATE USER [chsu] FOR LOGIN [chsu] WITH DEFAULT_SCHEMA=[dbo]

END

GO

USE [Northwind]

IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = 'chsu')

BEGIN

          CREATE USER [chsu] FOR LOGIN [chsu] WITH DEFAULT_SCHEMA=[dbo]

END

GO

USE [AdventureWorksDW]

IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = 'chsu')

BEGIN

          CREATE USER [chsu] FOR LOGIN [chsu] WITH DEFAULT_SCHEMA=[dbo]

END

GO

----------------------------------------------

--Login is db_owner of below databases

----------------------------------------------

USE [AdventureWorksDW]

EXEC sp_addrolemember 'db_owner','user_dbo_bond_bg'

GO

----------------------------------------------

--Grant all securable class to login

----------------------------------------------

USE [Northwind]

GRANT ALTER ANY APPLICATION ROLE TO [chsu]

GO

USE [Northwind]

GRANT ALTER ANY ROUTE TO [chsu]

GO

USE [Northwind]

GRANT CONNECT TO [chsu]

GO

USE [Northwind]

GRANT CREATE ROUTE TO [chsu]

GO

USE [Northwind]

GRANT SHOWPLAN TO [chsu]

GO

USE [master]

GRANT ALTER ANY LINKED SERVER TO [chsu]

GO

USE [master]

GRANT ALTER SETTINGS TO [chsu]

GO

USE [master]

GRANT CONNECT SQL TO [chsu]

GO

USE [master]

GRANT CONNECT TO [chsu]

GO

USE [master]

GRANT EXTERNAL ACCESS ASSEMBLY TO [chsu]

GO

USE [master]

GRANT UNSAFE ASSEMBLY TO [chsu]

GO

USE [master]

GRANT VIEW ANY DATABASE TO [chsu]

GO

USE [AdventureWorks]

GRANT CONNECT TO [chsu]

GO

USE [AdventureWorks]

GRANT SHOWPLAN TO [chsu]

GO

Fig 1.4

Conclusion

We have covered login regeneration in the first article with login creation, server-level role and database-level role assignment. In this artcile, we have summarized how to produce script for login on server-level and database-level securables.

In my final article of the series, I'll tackle the login generation for the finest scope securable – object-level permission for the login.

posted by LifeisSimple
2011. 2. 25. 12:26 Brain Trainning/NoSQL

import java.net.UnknownHostException;

import java.util.Calendar;

import java.util.GregorianCalendar;

import java.util.Iterator;

import java.util.List;

import java.util.regex.Pattern;

import org.junit.Test;

import java.sql.*;

 

import com.mongodb.BasicDBObject;

import com.mongodb.DB;

import com.mongodb.DBCollection;

import com.mongodb.DBCursor;

import com.mongodb.DBObject;

import com.mongodb.Mongo;

import com.mongodb.MongoException;

 

 

Public Class MongoDB {

       @Test

       // MSSQL 연결

       public void testMSSQLConnection() throws Exception {

             // TODO Auto-generated method stub

             Connection conSQL = null;

             Statement stmt = null;

             ResultSet rs = null;

            

             // MSSQL

             String strJDBCDrv = "com.microsoft.sqlserver.jdbc.SQLServerDriver";

             String strJDBCUrl = "jdbc:sqlserver://localhost:1433;DatabaseName=SQLDB";

             String strQuery = "select testID, testName from T_Test order by testID desc";

            

             // MongoDB

             Mongo connection = new Mongo("MongoDBIP", 27017);

             DB mongoJUnit = connection.getDB("testDB");

             DBCollection dept = mongoJUnit.getCollection("T_Test");

            

             /*

             try{

                    Class.forName(strJDBCDrv);

                    System.err.println("드라이버 검색 성공");

             }

             catch(ClassNotFoundException ee) {

                    ee.printStackTrace();

                    System.err.println("드라이버 검색 실패");

             }

             */

             try {

                    Class.forName(strJDBCDrv).newInstance();

                    conSQL = DriverManager.getConnection(strJDBCUrl, "sa", "1231231");

                   

                    stmt = conSQL.createStatement();

                    rs = stmt.executeQuery(strQuery);

                   

                    BasicDBObject putDB = new BasicDBObject();

                    //GregorianCalendar cal = new GregorianCalendar();

                   

                    while(rs.next()){

                           // System.out.println(rs.getString(1));

                          

                           putDB.put("testID", rs.getInt(1));

                           putDB.put("testName", rs.getString(2));

                          

                           dept.insert(putDB);

                          

                           putDB.clear();

                    }

             }

             catch (Exception e){

                    e.printStackTrace();

             }

             finally {

                    conSQL.close();

                    stmt.close();

                    rs.close();

             }

       }

}

'Brain Trainning > NoSQL' 카테고리의 다른 글

[MongoDB] Config 파일 옵션  (0) 2011.03.09
[MongoDB] Java Code Sample  (0) 2011.02.28
[MongoDB] Comparing MongoDB java frameworks  (0) 2011.02.23
[MongoDB] 당신의 모든 Backup  (0) 2011.01.25
[MongoDB] MongoDB Cacti Plugin Templates  (0) 2011.01.21
posted by LifeisSimple
prev 1 ··· 3 4 5 6 7 8 9 next