首页 文章

邮递员自动化测试框架

提问于
浏览
1

如何在post man工具中构建框架

  • 我们需要 to build framework in postman 这应该连接到其他一些外部数据文件以获取测试数据(Excel或数据库)

  • 报告部分应该在那里

请帮助我这个,我们有任何工具来集成和运行该工具邮递员 .

提前致谢!!阿西夫

2 回答

  • 4

    Newman是邮递员的配套工具,可以从命令行运行邮差测试 . 它还支持将newman用作库 . 我们使用它在我们的 生产环境 服务器上进行连续测试 .
    您可以使用邮递员作为该框架的一部分来开发您想要的任何框架 .

    这是我用来测试服务器返回的结果的519个已知良好值的测试 . 如果您在邮递员集合运行器中提供数据文件,或者您将其作为newman的命令行选项提供,则会填充数据 .

    tests["Status code " + responseCode.code] = responseCode.code === 200;
    var response = JSON.parse(responseBody);
    tests[ 'response.errorsCount == ' + response.errorsCount ] = response.errorsCount === 0 ;
    
    var outputs = response.outputs ;
    var outputsLength = outputs.length ;
    
    if( data.hasOwnProperty("outputs") ) {
    
        tests[ 'data.outputs.length == response.outputs.length ' ] = data.outputs.length == response.outputs.length ;
    
        var dataOutputsLength = data.outputs.length ;
         for ( index = 0; index < outputsLength ; index++){
            var output = outputs[index] ;
            var expectedOutput = data.outputs[index] ;
            tests[expectedOutput.cell + ': ' + expectedOutput.value + ' == ' + output.cell + ': ' + output.value ] = expectedOutput.cell == output.cell && expectedOutput.value == output.value ;
         }
    
    }
    

    以下是使用newman运行相同测试的示例 .

    newman --collection score-card-export-all-respondents.postman_collection --environment melbourne.postman_environment --data score-card-export-all-respondents-data.json --requestTimeout 60000
    

    这是我们用来测试我们的服务器的脚本 . 由./postman-runner.js调用 - 测试staging.json . 存储库网址是postman-runner

    #!/usr/bin/env node
    
    var Newman = require('newman') ;
    var ResponseExporter = require('newman/src/utilities/ResponseExporter') ;
    
    var SparkPost = require('sparkpost');
    var fs = require('fs') ;
    var path = require('path') ;        
    var argv = require('minimist')(process.argv.slice(2)) ;
    var errors = [] ;
    var testResultsTests = [] ;
    
    function EvaluateNewmanTestResults(options) {
    
        arguments = eval(options) ;
        if (!arguments){
            arguments = {} ;
        }
        this.created = new Date() ;
    
        if (arguments.name) {
            this.name = name ;
        } else {
            this.name = this.created.toUTCString() ; 
        }
        if (arguments.failed){
            this.failed = failed ;  
        } else {
            this.failed = [] ;              
        }
        if( arguments.resultsPath){
            this.resultsPath = arguments.resultsPath ;      
        } else {
            this.resultsPath = null ;
        }
    
        this.tests = [] ;
        this.urls = [] ;
        this.runCount = 0 ;
        this.failedCount = 0 ;
    
        this.summary = function(){
            return {name: this.name, urls: this.urls, runCount: this.runCount, failedCount: this.failedCount, failed: this.failed} ;
        }
    
        this.JSON = function(){
            return JSON.stringify( this.summary ) ;
        }
    
        this.evaluateResults = function(results_){
            this.tests = [] ;
            this.testResults = results_ ;
            this.overallResults = JSON.parse(fs.readFileSync(this.resultsPath, 'utf8')) ;
            if ( this.overallResults.collection.name) {
                this.name = this.overallResults.collection.name ;
            }
            for( testResultsIndex=0; testResultsIndex<this.testResults.length;testResultsIndex++){
                testResult = this.testResults[testResultsIndex] ;
                url = testResult.url ;
                this.urls.push(url) ;
                tests = testResult.tests ;
                for (var key in tests){
                    value = tests[key] ;
                    this.runCount++ ;
                    if ( value ) {
                        /* passed */
                    } else {
                        /* failed */
                        this.failed.push(key) ;
                        this.failedCount++ ;
                    }
                }
            }
        }
    
    }
    
    function notifyViaSparkPost( key, arguments, from, subject, html, notify, results, summary ){
    
        holdArguments = arguments ;
        arguments = JSON.stringify(arguments) ;
    
        /* failed tests is swapped into the html*/
        failedTests = JSON.stringify(summary) ;
        /* swap out {} to [] to avoid errors when populating html */ 
        failedTests = failedTests.replace(/{{/g, '[[') ;
        failedTests = failedTests.replace(/}}/g, ']]') ;
    
        /* collection name is swapped into the html*/
        collectionName = summary.name ;
    
        var regularExpression = /(?:{)(.*?)(?:})/g
    
        match = regularExpression.exec(subject);
        while (match != null) {
            subject = subject.replace( match[0], eval(match[1])) ;
            match = regularExpression.exec(subject) ;       
        }
    
        match = regularExpression.exec(html);
        while (match != null) {
            console.log(match[0]) ;
            console.log(match[1]) ;
            html = html.replace( match[0], eval(match[1])) ;
            match = regularExpression.exec(html) ;      
        }
    
        html = html.replace(/\[\[/g,'{{') ;
        html = html.replace(/\]\]/g,'}}') ;
    
        var sparkPost = new SparkPost(key);
    
        sparkPost.transmissions.send({
          transmissionBody: {
            content: {
              from: from,
              subject: subject,
              html: html
            },
            recipients: notify
          }
        }, function(err, res) {
          if (err) {
            console.log('Unexpected error sending email notification');
            console.log(err);
          } else {
            console.dir({sent:'email', 'with': holdArguments, 'and': failedTests });
          }
        });
    
    }
    
    function nextTest (arguments,sparkpostApiKey,tests,failed,callback) {
    
        var test = tests.shift() ;
        if (!test){
            callback(failed,arguments) ;
        } else {
            handleTest(arguments,sparkpostApiKey,test,tests,failed,callback) ;
        }
    
    }
    
    function handleTest(arguments,sparkpostApiKey,test,tests,failed,callback){
    
        var description = test.description ;
        var resultsJson = null ;
        var ran = {} ;
        var newmanOptions = {} ;
        var holdArguments = arguments ;
    
        if ( description ) {
            console.log('') ;
            console.log( 'Running ' + description ) ;
        }
        var collection = test.collection.join(path.sep) ;
        var environment = test.environment.join(path.sep) ;
    
        dataFile = test.data
        if ( dataFile ) {
            dataFile = dataFile.join(path.sep) ;
        }
    
        results = test.results.join(path.sep) ;
        requestTimeout = test.requestTimeout ;
    
        collectionJson = JSON.parse(fs.readFileSync(collection, 'utf8')) ;
        environmentJson = JSON.parse(fs.readFileSync(environment, 'utf8')) ;
    
        newmanOptions = {
            envJson: environmentJson ,
            iterationCount: 1,                      // define the number of times the runner should run 
            outputFile: results,                    // the file to export to 
            responseHandler: "TestResponseHandler", // the response handler to use 
            asLibrary: true,                        // this makes sure the exit code is returned as an argument to the callback function 
            stopOnError: false,
            dataFile: dataFile,
            requestTimeout: requestTimeout
        }
    
        arguments = {collection:collection, environment:environment, dataFile:dataFile, results:results} ;
        notificationArguments = {collection:collection, environment:environment, dataFile:dataFile, results:results} ; 
    
        ouch = new EvaluateNewmanTestResults() ;
        ouch.resultsPath = results ;
        ouch.runCount = 0 ;
    
        if ( argv.simulateTestFailure ) {
    
            resultsJson = JSON.parse(fs.readFileSync(results, 'utf8')) ;
            ran = { name: collection.name, runCount:0, failedCount:0, exitCode: exitCode, failed: ["Some example failed tests","and another"] } ;
    
            notifyViaSparkPost( 
                sparkpostApiKey, 
                arguments,
                test.notify.sparkpost.from, 
                test.notify.sparkpost.subject,
                test.notify.sparkpost.html, 
                test.notify.sparkpost.recipients, 
                resultsJson,
                ran 
            ) ;     
    
            nextTest(holdArguments, sparkpostApiKey, tests, failed, callback) ;
    
        } else {
    
            /* clear the results from any previous run */
            ResponseExporter._results = [] ;        
    
            Newman.execute(collectionJson, newmanOptions, function(exitCode){
    
                ouch.evaluateResults(ResponseExporter._results) ;
                console.dir(ouch.summary()) ;
    
                if (!holdArguments.totalTestsFailed) {
                    holdArguments.totalTestsFailed = 0 ;
                }
                if (!holdArguments.totalTestsRun) {
                    holdArguments.totalTestsRun = 0 ;
                }
                holdArguments.totalTestsFailed = holdArguments.totalTestsFailed + ouch.failedCount ;
                holdArguments.totalTestsRun = holdArguments.totalTestsRun + ouch.runCount ;
    
                if (ouch.failedCount>0){
                    notifyViaSparkPost( 
                        sparkpostApiKey, 
                        notificationArguments,
                        test.notify.sparkpost.from, 
                        test.notify.sparkpost.subject,
                        test.notify.sparkpost.html, 
                        test.notify.sparkpost.recipients, 
                        resultsJson,
                        ouch.summary() ) ;
                }
    
                nextTest(holdArguments, sparkpostApiKey, tests, failed, callback) ;
    
            }) ;
        }
    
    }
    
    if ( !argv.tests ) {
        errors.push('--tests parameter is missing') ;
    } else {
        if( !fs.existsSync(argv.tests)){
            errors.push( argv.tests + ' is an invalid path') ;      
        }
    }
    
    if ( errors.length > 0 ) {
    
        console.dir({ errors: errors }) ;   
    
    } else {
    
        fs.readFile( argv.tests, 'utf8', function( error, data) {
    
                var tests = JSON.parse(data) ;
                var sparkpostApiKey = tests.sparkpostApiKey ;
                var run = tests.run ; // array of tests to run
                var failed = [] ;
                argv.totalTestsRun = 0 ;
                argv.totalTestsFailed = 0 ;
    
                nextTest(argv, sparkpostApiKey, run, failed, function(failed,arguments){
                    console.log('finished test runs') ; 
                    if ( failed.length > 0){
                        console.dir(failed) ;                   
                    }
                    console.dir({ totalTestsRun: arguments.totalTestsRun, totalTestsFailed: arguments.totalTestsFailed } ) ;
                }) ;
    
        }) ;
    
    }
    
  • 5

    我不会依赖Postman作为测试框架的基础 .

    Postman非常适合调试和调试,但我怀疑它是与“持续构建集成”完美结合并提供全面报告的最佳工具 .

    我会考虑调查Apache jMeter .

    它有点复杂,但它确实有很多非常酷的功能,插件等,它可以轻松地集成到你想要的任何东西 . 它完成了邮递员所做的一切 .

    在我的项目中,我们使用Postman在编码完成时快速检查以及可用性和负载测试 - jMeter .

相关问题