Skip to content

Commit

Permalink
support/useful-queries (#1528)
Browse files Browse the repository at this point in the history
* diagnostic queries

* check summary query & notes on pin gen

* add offset support for bulk data creation
  • Loading branch information
GuyHarwood authored Feb 13, 2020
1 parent a9e8a3a commit 76da065
Show file tree
Hide file tree
Showing 13 changed files with 202 additions and 7 deletions.
38 changes: 38 additions & 0 deletions admin/services/check-start.service/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# Check & Pin Generation

## Current journey

- pupilPinController.postGeneratePins
- get check window
- determine eligibility to generate pin
- find school
- generate school password if necessary (refactor already in progress)
- checkStartService.prepareCheck2
- get pupils eligible for pin generation
- validate that they are still eligible (non db call)
- get all check forms (cached)
- get forms used by pupil
- checkStartService.initialisePupilCheck (call per pupil)
- allocate check form
- get pin expiry time
- create checks in batch
- find checks by pupil id
- create pupil check payloads
- prepare checks
- store check configs

## proposals
- spCreateChecks returns check payload data, rather than just ids
- this eliminates 2 calls
- cache sas tokens for 1 hour (configurable)
- new service 'check allocation / preparation'
- smaller distinct services with single purpose
- more robust (include rollbacks)

## Concerns

- not easy to navigate / maintain
- not optimised
- naming issues
- data / object bloat
- should this be out of process? (in a function etc)
3 changes: 2 additions & 1 deletion deploy/sql/config.js
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ module.exports = {
PauseMultiplier: parseFloat(process.env.RETRY_PAUSE_MULTIPLIER) || 1.5
},
DummyData: {
SchoolCount: parseInt(process.env.DUMMY_SCHOOL_COUNT, 10) || 100
SchoolCount: parseInt(process.env.DUMMY_SCHOOL_COUNT, 10) || 100,
SchoolOffset: parseInt(process.env.DUMMY_SCHOOL_OFFSET, 10) || 10
}
}
4 changes: 3 additions & 1 deletion deploy/sql/dummy-data/school.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ const config = require('../config')
const { performance } = require('perf_hooks')

const schoolCount = config.DummyData.SchoolCount
const schoolOffset = config.DummyData.SchoolOffset
const schoolUpperLimit = schoolCount + schoolOffset

const table = new sql.Table('mtc_admin.school')
table.create = false
Expand All @@ -13,7 +15,7 @@ table.columns.add('estabCode', sql.NVarChar, { length: 'max' })
table.columns.add('name', sql.NVarChar, { length: 'max', nullable: false })
table.columns.add('urn', sql.Int, { nullable: false })
table.columns.add('dfeNumber', sql.Int, { nullable: false })
for (let idx = 0; idx < schoolCount; idx++) {
for (let idx = schoolOffset; idx < schoolUpperLimit; idx++) {
table.rows.add(777, 'estab', `bulk school ${idx + 1}`, idx + 1, idx + 1)
}

Expand Down
13 changes: 8 additions & 5 deletions deploy/sql/dummy-data/teacher.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,13 @@ const sql = require('mssql')
const config = require('../config')
const { performance } = require('perf_hooks')

const teacherCount = config.DummyData.SchoolCount
const password = '$2a$10$.WsawgZpWSAQVaa6Vz3P1.XO.1YntYJLd6Da5lrXCAkVxhhLpkOHK'
const teacherCount = config.DummyData.SchoolCount
const schoolOffset = config.DummyData.SchoolOffset
const teacherRoleId = 3
let schoolId = 7
let schoolId = schoolOffset
let teacherIndex = schoolOffset
const teacherUpperLimit = teacherCount + schoolOffset

const table = new sql.Table('mtc_admin.user')
table.create = false
Expand All @@ -16,15 +19,15 @@ table.columns.add('passwordHash', sql.NVarChar, { length: 'max' })
table.columns.add('school_id', sql.Int)
table.columns.add('role_id', sql.Int, { nullable: false })

for (let idx = 0; idx < teacherCount; idx++) {
table.rows.add(`bulk-teacher${idx + 1}`, password, schoolId++, teacherRoleId)
for (teacherIndex = 10001; teacherIndex < teacherUpperLimit; teacherIndex++) {
table.rows.add(`bulk-teacher${teacherIndex + 1}`, password, schoolId++, teacherRoleId)
}

const pool = new sql.ConnectionPool(config.Sql)
pool.connect()
.then(() => {
console.log('connected')
console.log(`inserting ${teacherCount} teachers...`)
console.log(`inserting range of teachers from ${schoolOffset} to ${teacherUpperLimit}...`)
const request = new sql.Request(pool)
const start = performance.now()
request.bulk(table, async (err, result) => {
Expand Down
3 changes: 3 additions & 0 deletions support/sql-azure/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# mssql-utils

utility scripts for devops
7 changes: 7 additions & 0 deletions support/sql-azure/app-check-summary.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
SELECT COUNT(*) as [TotalChecks],
COUNT(CASE WHEN received=0 THEN 1 END) AS [NotReceived],
COUNT(CASE WHEN received=1 THEN 1 END) AS [Received],
COUNT(CASE WHEN complete=0 THEN 1 END) AS [Incomplete],
COUNT(CASE WHEN complete=1 THEN 1 END) AS [Complete],
COUNT(CASE WHEN processingFailed=1 THEN 1 END) AS [Failures]
FROM mtc_admin.[check]
8 changes: 8 additions & 0 deletions support/sql-azure/azure-active-hot-queries.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
PRINT '-- top 10 Active CPU Consuming Queries (aggregated)--';
SELECT TOP 10 GETDATE() runtime, *
FROM(SELECT query_stats.query_hash, SUM(query_stats.cpu_time) 'Total_Request_Cpu_Time_Ms', SUM(logical_reads) 'Total_Request_Logical_Reads', MIN(start_time) 'Earliest_Request_start_Time', COUNT(*) 'Number_Of_Requests', SUBSTRING(REPLACE(REPLACE(MIN(query_stats.statement_text), CHAR(10), ' '), CHAR(13), ' '), 1, 256) AS "Statement_Text"
FROM(SELECT req.*, SUBSTRING(ST.text, (req.statement_start_offset / 2)+1, ((CASE statement_end_offset WHEN -1 THEN DATALENGTH(ST.text)ELSE req.statement_end_offset END-req.statement_start_offset)/ 2)+1) AS statement_text
FROM sys.dm_exec_requests AS req
CROSS APPLY sys.dm_exec_sql_text(req.sql_handle) AS ST ) AS query_stats
GROUP BY query_hash) AS t
ORDER BY Total_Request_Cpu_Time_Ms DESC;
16 changes: 16 additions & 0 deletions support/sql-azure/azure-past-hot-queries.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
-- Top 15 CPU consuming queries by query hash
-- note that a query hash can have many query id if not parameterized or not parameterized properly
-- it grabs a sample query text by min
WITH AggregatedCPU AS (SELECT q.query_hash, SUM(count_executions * avg_cpu_time / 1000.0) AS total_cpu_millisec, SUM(count_executions * avg_cpu_time / 1000.0)/ SUM(count_executions) AS avg_cpu_millisec, MAX(rs.max_cpu_time / 1000.00) AS max_cpu_millisec, MAX(max_logical_io_reads) max_logical_reads, COUNT(DISTINCT p.plan_id) AS number_of_distinct_plans, COUNT(DISTINCT p.query_id) AS number_of_distinct_query_ids, SUM(CASE WHEN rs.execution_type_desc='Aborted' THEN count_executions ELSE 0 END) AS Aborted_Execution_Count, SUM(CASE WHEN rs.execution_type_desc='Regular' THEN count_executions ELSE 0 END) AS Regular_Execution_Count, SUM(CASE WHEN rs.execution_type_desc='Exception' THEN count_executions ELSE 0 END) AS Exception_Execution_Count, SUM(count_executions) AS total_executions, MIN(qt.query_sql_text) AS sampled_query_text
FROM sys.query_store_query_text AS qt
JOIN sys.query_store_query AS q ON qt.query_text_id=q.query_text_id
JOIN sys.query_store_plan AS p ON q.query_id=p.query_id
JOIN sys.query_store_runtime_stats AS rs ON rs.plan_id=p.plan_id
JOIN sys.query_store_runtime_stats_interval AS rsi ON rsi.runtime_stats_interval_id=rs.runtime_stats_interval_id
WHERE rs.execution_type_desc IN ('Regular', 'Aborted', 'Exception')AND rsi.start_time>=DATEADD(HOUR, -2, GETUTCDATE())
GROUP BY q.query_hash), OrderedCPU AS (SELECT query_hash, total_cpu_millisec, avg_cpu_millisec, max_cpu_millisec, max_logical_reads, number_of_distinct_plans, number_of_distinct_query_ids, total_executions, Aborted_Execution_Count, Regular_Execution_Count, Exception_Execution_Count, sampled_query_text, ROW_NUMBER() OVER (ORDER BY total_cpu_millisec DESC, query_hash ASC) AS RN
FROM AggregatedCPU)
SELECT OD.query_hash, OD.total_cpu_millisec, OD.avg_cpu_millisec, OD.max_cpu_millisec, OD.max_logical_reads, OD.number_of_distinct_plans, OD.number_of_distinct_query_ids, OD.total_executions, OD.Aborted_Execution_Count, OD.Regular_Execution_Count, OD.Exception_Execution_Count, OD.sampled_query_text, OD.RN
FROM OrderedCPU AS OD
WHERE OD.RN<=15
ORDER BY total_cpu_millisec DESC;
14 changes: 14 additions & 0 deletions support/sql-azure/deadlock-identify.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
SELECT * FROM sys.event_log
WHERE event_type = 'deadlock';
WITH CTE AS (
SELECT CAST(event_data AS XML) AS [target_data_XML]
FROM sys.fn_xe_telemetry_blob_target_read_file('dl',
null, null, null)
)
SELECT target_data_XML.value('(/event/@timestamp)[1]',
'DateTime2') AS Timestamp,
target_data_XML.query('/event/data[@name=''xml_report'']
/value/deadlock') AS deadlock_xml,
target_data_XML.query('/event/data[@name=''database_name'']
/value').value('(/value)[1]', 'nvarchar(100)') AS db_name
FROM CTE
23 changes: 23 additions & 0 deletions support/sql-azure/fk-unindexed.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
-- https://www.sqlshack.com/index-foreign-key-columns-sql-server/
CREATE TABLE #TempForeignKeys (TableName varchar(100), ForeignKeyName varchar(100) , ObjectID int)
INSERT INTO #TempForeignKeys
SELECT OBJ.NAME, ForKey.NAME, ForKey .[object_id]
FROM sys.foreign_keys ForKey
INNER JOIN sys.objects OBJ
ON OBJ.[object_id] = ForKey.[parent_object_id]
WHERE OBJ.is_ms_shipped = 0

CREATE TABLE #TempIndexedFK (ObjectID int)
INSERT INTO #TempIndexedFK
SELECT ObjectID
FROM sys.foreign_key_columns ForKeyCol
JOIN sys.index_columns IDXCol
ON ForKeyCol.parent_object_id = IDXCol.[object_id]
JOIN #TempForeignKeys FK
ON ForKeyCol.constraint_object_id = FK.ObjectID
WHERE ForKeyCol.parent_column_id = IDXCol.column_id

SELECT * FROM #TempForeignKeys WHERE ObjectID NOT IN (SELECT ObjectID FROM #TempIndexedFK)

DROP TABLE #TempForeignKeys
DROP TABLE #TempIndexedFK
8 changes: 8 additions & 0 deletions support/sql-azure/index-count-per-table.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
-- https://blog.sqlauthority.com/2012/10/09/sql-server-identify-numbers-of-non-clustered-index-on-tables-for-entire-database/
SELECT COUNT(i.TYPE) NoOfIndex,
[schema_name] = s.name, table_name = o.name
FROM sys.indexes i
INNER JOIN sys.objects o ON i.[object_id] = o.[object_id] INNER JOIN sys.schemas s ON o.[schema_id] = s.[schema_id] WHERE o.TYPE IN ('U')
AND i.TYPE = 2
GROUP BY s.name, o.name
ORDER BY schema_name, table_name
33 changes: 33 additions & 0 deletions support/sql-azure/index-missing.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
-- Missing Index Script
-- Original Author: Pinal Dave
-- https://blog.sqlauthority.com/2011/01/03/sql-server-2008-missing-index-script-download/
SELECT TOP 25
dm_mid.database_id AS DatabaseID,
dm_migs.avg_user_impact*(dm_migs.user_seeks+dm_migs.user_scans) Avg_Estimated_Impact,
dm_migs.last_user_seek AS Last_User_Seek,
OBJECT_NAME(dm_mid.OBJECT_ID,dm_mid.database_id) AS [TableName],
'CREATE INDEX [IX_' + OBJECT_NAME(dm_mid.OBJECT_ID,dm_mid.database_id) + '_'
+ REPLACE(REPLACE(REPLACE(ISNULL(dm_mid.equality_columns,''),', ','_'),'[',''),']','')
+ CASE
WHEN dm_mid.equality_columns IS NOT NULL
AND dm_mid.inequality_columns IS NOT NULL THEN '_'
ELSE ''
END
+ REPLACE(REPLACE(REPLACE(ISNULL(dm_mid.inequality_columns,''),', ','_'),'[',''),']','')
+ ']'
+ ' ON ' + dm_mid.statement
+ ' (' + ISNULL (dm_mid.equality_columns,'')
+ CASE WHEN dm_mid.equality_columns IS NOT NULL AND dm_mid.inequality_columns
IS NOT NULL THEN ',' ELSE
'' END
+ ISNULL (dm_mid.inequality_columns, '')
+ ')'
+ ISNULL (' INCLUDE (' + dm_mid.included_columns + ')', '') AS Create_Statement
FROM sys.dm_db_missing_index_groups dm_mig
INNER JOIN sys.dm_db_missing_index_group_stats dm_migs
ON dm_migs.group_handle = dm_mig.index_group_handle
INNER JOIN sys.dm_db_missing_index_details dm_mid
ON dm_mig.index_handle = dm_mid.index_handle
WHERE dm_mid.database_ID = DB_ID()
ORDER BY Avg_Estimated_Impact DESC
GO
39 changes: 39 additions & 0 deletions support/sql-azure/index-unused.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
-- Unused Index Script
-- Original Author: Pinal Dave
-- https://blog.sqlauthority.com/2011/01/04/sql-server-2008-unused-index-script-download/
/*
Please note, if you should not drop all the unused indexes this script suggest.
This is just for guidance. You should not create more than 5-10 indexes per table.
Additionally, this script sometime does not give accurate information so use your common sense.
---------------------------------------------------------------------------------------------------------
You should pay attention to User Scan, User Lookup and User Update when you are going to drop the index.
The generic understanding is if this value is all high and User Seek is low, the index needs tuning.
The index drop script is also provided in the last column.
*/
SELECT TOP 25
o.name AS ObjectName
, i.name AS IndexName
, i.index_id AS IndexID
, dm_ius.user_seeks AS UserSeek
, dm_ius.user_scans AS UserScans
, dm_ius.user_lookups AS UserLookups
, dm_ius.user_updates AS UserUpdates
, p.TableRows
, 'DROP INDEX ' + QUOTENAME(i.name)
+ ' ON ' + QUOTENAME(s.name) + '.'
+ QUOTENAME(OBJECT_NAME(dm_ius.OBJECT_ID)) AS 'drop statement'
FROM sys.dm_db_index_usage_stats dm_ius
INNER JOIN sys.indexes i ON i.index_id = dm_ius.index_id
AND dm_ius.OBJECT_ID = i.OBJECT_ID
INNER JOIN sys.objects o ON dm_ius.OBJECT_ID = o.OBJECT_ID
INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
INNER JOIN (SELECT SUM(p.rows) TableRows, p.index_id, p.OBJECT_ID
FROM sys.partitions p GROUP BY p.index_id, p.OBJECT_ID) p
ON p.index_id = dm_ius.index_id AND dm_ius.OBJECT_ID = p.OBJECT_ID
WHERE OBJECTPROPERTY(dm_ius.OBJECT_ID,'IsUserTable') = 1
AND dm_ius.database_id = DB_ID()
AND i.type_desc = 'nonclustered'
AND i.is_primary_key = 0
AND i.is_unique_constraint = 0
ORDER BY (dm_ius.user_seeks + dm_ius.user_scans + dm_ius.user_lookups) ASC
GO

0 comments on commit 76da065

Please sign in to comment.