[{"data":1,"prerenderedAt":106},["ShallowReactive",2],{"glossary-page-\u002Fglossary\u002Ftest-coverage":3},{"id":4,"title":5,"body":6,"description":93,"extension":94,"meta":95,"navigation":101,"path":102,"seo":103,"stem":104,"__hash__":105},"docs\u002Fglossary\u002Ftest-coverage.md","Test Coverage",{"type":7,"value":8,"toc":85},"minimark",[9,15,20,27,31,34,50,53,57,60,64],[10,11,12],"glossary-title",{},[13,14,5],"p",{},[16,17,19],"h2",{"id":18},"what-is-test-coverage","What Is Test Coverage?",[13,21,22,26],{},[23,24,25],"strong",{},"Test coverage"," is a measure of how much of a codebase is exercised by automated tests.\nIt is often expressed as a percentage of lines, branches, or functions covered during test execution.",[16,28,30],{"id":29},"why-test-coverage-matters","Why Test Coverage Matters",[13,32,33],{},"Test coverage matters because it can help teams:",[35,36,37,41,44,47],"ul",{},[38,39,40],"li",{},"understand where testing is thin,",[38,42,43],{},"identify unprotected code areas,",[38,45,46],{},"support safer refactoring,",[38,48,49],{},"improve quality conversations.",[13,51,52],{},"Coverage should still be interpreted carefully. High coverage does not guarantee good tests.",[16,54,56],{"id":55},"how-oobeya-uses-test-coverage-context","How Oobeya Uses Test Coverage Context",[13,58,59],{},"Oobeya treats test coverage as supporting context rather than a standalone success metric. It becomes more useful when combined with delivery, failure, and code quality signals.",[16,61,63],{"id":62},"related-terms","Related Terms",[35,65,66,73,79],{},[38,67,68],{},[69,70,72],"a",{"href":71},"\u002Fglossary\u002Fflaky-test","Flaky Test",[38,74,75],{},[69,76,78],{"href":77},"\u002Fglossary\u002Fsonarqube","SonarQube",[38,80,81],{},[69,82,84],{"href":83},"\u002Fglossary\u002Fdefect-escape-rate","Defect Escape Rate",{"title":86,"searchDepth":87,"depth":87,"links":88},"",2,[89,90,91,92],{"id":18,"depth":87,"text":19},{"id":29,"depth":87,"text":30},{"id":55,"depth":87,"text":56},{"id":62,"depth":87,"text":63},"Test coverage is a measure of how much of a codebase is exercised by automated tests.","md",{"category":96,"tags":97},"T",[98,99,100],"Testing","Code Quality","Engineering Metrics",true,"\u002Fglossary\u002Ftest-coverage",{"title":5,"description":93},"glossary\u002Ftest-coverage","T6f4j-XUdz0erT4PRBsfCR1gu2wv7GOs5y2IpnT2t7U",1775745637378]