/6.0.3/testrunner/pytests/ |
H A D | nonroottests.py | 39 command = "cd /home/{0}/opt/couchbase && ./bin/couchbase-server -k".format(server.ssh_username) 42 command = "rm -rf etc/ opt/ usr/ {0}.*".format(self.build[:-4]) 55 command = "cd /home/{0}/opt/couchbase && ./bin/couchbase-server -k".format(server.ssh_username) 58 command = "rm -rf etc/ opt/ usr/ {0}.*".format(self.build[:-4]) 76 command0 = "rm -rf opt/ etc/ && rm -rf {0}".format(self.build) 77 command1 = "wget http://builds.hq.northscale.net/latestbuilds/{0}".format(self.build) 78 command2 = "rpm2cpio {0} | cpio --extract --make-directories --no-absolute-filenames".format(self.build) 79 command3 = "cd /home/{0}/opt/couchbase && ./bin/install/reloc.sh `pwd`".format(server.ssh_username) 80 command4 = "cd /home/{0}/opt/couchbase && ./bin/couchbase-server -- -noinput -detached".format(server.ssh_username) 81 command5 = "cd /home/{0}/opt/couchbase && ./bin/couchbase-server -k".format(serve [all...] |
H A D | warmupcluster.py | 65 msg="unable to create {0} bucket".format(name)) 71 msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName) 78 msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName) 85 items = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, howmany)] 89 self.log.info("inserted {0} items".format(howmany)) 109 map["{0}:{1}".format(server.ip, server.port)] = {} 110 map["{0}:{1}".format(server.ip, server.port)]["curr_items_tot"] = mc_conn.stats("")["curr_items_tot"] 111 map["{0}:{1}".format(server.ip, server.port)]["previous_uptime"] = mc_conn.stats("")["uptime"] 114 "memcached {0}:{1} has {2} items".format(server.ip, server.port, mc_conn.stats("")["curr_items_tot"])) 126 command = "os:cmd(\"kill -9 {0} \")".format(pi [all...] |
/6.0.3/testrunner/pytests/sg/ |
H A D | sg_config_base.py | 50 template = env.get_template('{0}'.format(template_filename)) 52 password_str = urllib.quote('{0}'.format(self.bucket)) +':{0}@'.format(self.password) 85 self.config = '/tmp/{0}'.format(self.config) 86 output, error = shell.execute_command('cat {0}'.format(self.config)) 90 .format(self.param, self.config)) 95 self.log.info('Sync Gateway is running with pid of {0}'.format(obj.pid)) 109 self.log.info('=== start_sync_gateway_template - template file {0}'.format(template)) 112 self.generate_sync_gateways_config(template, 'pytests/sg/resources/{0}'.format(self.config)) 122 self.log.info('No output from issuing {0}'.format(cm [all...] |
H A D | sg_webhook_base.py | 26 shell.execute_command("rm -rf {0}/tmp/*".format(self.folder_prefix)) 31 exist = shell.file_exists('{0}/tmp/'.format(self.folder_prefix), 'gateway.log') 39 self.log.info('=== start_sync_gateway with config file {0}.'.format(config_filename)) 40 output, error = shell.execute_command('cat {0}/tmp/{1}'.format(self.folder_prefix, config_filename)) 46 format(self.installed_folder, config_filename, 50 ' /tmp/{0} >/tmp/gateway.log 2>&1 &'.format(config_filename)) 54 self.log.info('start_sync_gateway - Sync Gateway is running with pid of {0}'.format(obj.pid)) 55 if not shell.file_exists('{0}/tmp/'.format(self.folder_prefix), 'gateway.log'): 62 output, error = shell.execute_command('cat {0}/tmp/gateway.log'.format(self.folder_prefix)) 68 cmd = 'curl -X {0} http://{1}:4984/db/{2}{3}'.format(metho [all...] |
H A D | sg_base.py | 58 self.log.info('Trying to get "{0}"'.format(url)) 64 self.log.info('Found "{0}"'.format(url)) 83 url = '{0}/{1}'.format(location, pattern.format(self.version, self.info.architecture_type, file_ext)) 111 self.log.info('uninstall_gateway is not supported on {0}, {1}'.format(type, distribution_type)) 112 self.log.info('=== Un-installing Sync Gateway package on {0} - cmd: {1}'.format(self.master, cmd)) 117 self.log.info('=== Un-installing Sync Gateway package on {0} - cmd: {1}'.format(self.master, cmd)) 128 cmd = 'yes | rpm -i /tmp/{0}'.format(filename) 130 cmd = 'yes | dpkg -i /tmp/{0}'.format(filename) 134 .format(filenam [all...] |
/6.0.3/testrunner/scripts/ |
H A D | getcoredumps.py | 47 print "looking for crashes on {0} ... ".format(info.ip) 48 print "erl_crash files under /opt/{0}/var/lib/{0}/".format(server_type) 49 core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "erl_crash")) 50 print "core* files under /opt/{0}/var/lib/{0}/".format(server_type) 51 core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "core")) 54 print "breakpad *dmp files under /opt/{0}/var/lib/{0}/".format(server_type) 55 core_files.extend(remote.file_ends_with("/opt/{0}/var/lib/{0}/".format(server_type), ".dmp")) 57 print "found crashes on {0}: {1}".format(info.ip, core_files) 59 print "crashes not found on {0}".format(info.ip) 64 erl_crash_file_name = "erlang-{0}-{1}.log".format(sel [all...] |
H A D | grabdiags.py | 23 print "grabbing diags from ".format(serverInfo.ip) 24 diag_url = "http://{0}:{1}/diag".format(serverInfo.ip,serverInfo.port) 30 filename = "{0}-{1}-diag.txt".format(serverInfo.ip,serverInfo.port) 33 os.write(1, "downloading {0} ...".format(serverInfo.ip)) 40 file_input = open('{0}'.format(filename), 'rb') 41 zipped = gzip.open("{0}.gz".format(filename), 'wb') 47 print "downloaded and zipped diags @ : {0}".format("{0}.gz".format(filename)) 49 print "unable to obtain diags from {0}".format(diag_url) 51 print "unable to obtain diags from {0}".format(diag_ur [all...] |
/6.0.3/testrunner/pytests/tuqquery/ |
H A D | n1ql_aggregate_pushdown_array.py | 51 query_definitions = [select_non_array_where_clause.format(non_array_first_field["name"], 54 select_array_where_clause.format(non_array_first_field["name"], 59 query_definitions = [select_non_array_where_clause.format(non_array_first_field["name"], 62 select_non_array_where_clause.format(non_array_first_field["name"], 65 select_array_where_clause.format(non_array_first_field["name"], 68 select_array_where_clause.format(non_array_first_field["name"], 71 select_non_array_where_clause.format(non_array_first_field["name"], 74 select_non_array_where_clause.format(non_array_first_field["name"], 90 log.info("Query {0} failed in explain".format(query)) 100 "Aggregate Pushdown Query Results fails: {0}".format(failed_queries_in_resul [all...] |
H A D | date_time_functions.py | 54 expected_utc_query = 'SELECT DATE_PART_STR(MILLIS_TO_UTC({0}), "{1}")'.format(milliseconds, part) 61 {3} parts".format(actual_utc_result["results"][0], expected_utc_result["results"][0]["$1"], 63 expected_local_query = 'SELECT DATE_PART_STR(MILLIS_TO_STR({0}), "{1}")'.format(milliseconds, part) 70 {3} parts".format(actual_local_result["results"][0], expected_local_result["results"][0]["$1"], 77 query = 'SELECT DATE PART_MILLIS({0}, "{1}")'.format(expression, part) 97 date_format_query = 'DATE_FORMAT_STR("{0}", "{1}")'.format(expression, expected_format) 100 query = 'SELECT LENGTH("{0}")'.format(expected_format) 103 "Results mismatch for query {0}".format(date_format_query)) 123 query = 'SELECT DATE_ADD_STR("{0}", {1}, "{2}")'.format(first_expression, count, part) 126 self.assertIsNotNone(temp_expression, "result is {0} for query {1}".format(expected_resul [all...] |
H A D | upgrade_n1qlrbac.py | 41 self.query = 'create index idx on {0}(meta().id)'.format(bucket.name) 56 self.query = "GRANT {0} to {1}".format("admin",'john') 67 self.query = "GRANT {0} to {1}".format("cluster_admin",'johnClusterAdmin') 73 self.query = "GRANT {0} on {2} to {1}".format("bucket_admin",'standard_bucket0','standard_bucket0') 80 format('johnClusterAdmin','password', self.master.ip, bucket.name,self.curl_path) 85 format(bucket.name, 'johnClusterAdmin')) 89 format('john','password', self.master.ip, bucket.name,self.curl_path) 93 format(bucket.name, 'john_admin')) 96 format('standard_bucket0','password', self.master.ip, bucket.name,self.curl_path) 100 format(bucke [all...] |
/6.0.3/testrunner/pysystests/ |
H A D | runsystest.py | 24 print("Successfully SSHed to {0}".format(ip)) 38 print ("SFTPing to {0}".format(ip)) 47 print "Killing {0}".format(process_name) 48 _, stdout, _ = ssh_client.exec_command("pgrep -f {0}".format(process_name)) 50 ssh_client.exec_command("kill -9 {0}".format(pid.split()[0])) 54 print ("Starting {0}...".format(process_name)) 57 _, stdout, _ = ssh_client.exec_command("pgrep {0}".format(process_name)) 58 print ("{0} is running with pid {1}".format(process_name, stdout.readlines()[0])) 64 print("\n##### Setting up RabbitMQ @ {0} #####".format(cfg.RABBITMQ_IP)) 72 rabbitmq_client.exec_command("sudo kill -9 {0}".format(pi [all...] |
/6.0.3/goproj/src/github.com/couchbase/indexing/secondary/logging/ |
H A D | logging.go | 35 Warnf(format string, v ...interface{}) 37 Errorf(format string, v ...interface{}) 39 Fatalf(format string, v ...interface{}) 41 Infof(format string, v ...interface{}) 43 Verbosef(format string, v ...interface{}) 47 Timer(format string, v ...interface{}) Ender 49 Debugf(format string, v ...interface{}) 51 Tracef(format string, v ...interface{}) 126 func (log *destination) Warnf(format string, v ...interface{}) { 127 log.printf(Warn, format, [all...] |
/6.0.3/godeps/src/github.com/couchbase/goforestdb/ |
H A D | log.go | 26 Warnf(format string, v ...interface{}) 28 Errorf(format string, v ...interface{}) 30 Fatalf(format string, v ...interface{}) 32 Infof(format string, v ...interface{}) 34 Debugf(format string, v ...interface{}) 36 Tracef(format string, v ...interface{}) 79 func (l *LeveledLog) Fatalf(format string, a ...interface{}) { 81 log.Fatalf(format, a...) 85 func (l *LeveledLog) Errorf(format string, a ...interface{}) { 87 log.Printf(format, [all...] |
/6.0.3/testrunner/pytests/2i/ |
H A D | aggregate_pushdown_upgrade_2i.py | 38 where_clauses = ["int_num = {0}".format(random.randint(-100, 100)), 39 "int_num > {0}".format(random.randint(-100, 100)), 40 "int_num < {0}".format(random.randint(-100, 100)), 41 "int_num > {0} AND int_num < {1}".format(random.randint(-100, 0), random.randint(0, 100)), 42 "int_num BETWEEN {0} AND {1}".format(random.randint(-100, 0), random.randint(0, 100))] 51 "UNNEST d.`int_arr` AS t where t = {0}".format(random.randint(-100, 100)), 52 "UNNEST d.`int_arr` AS t where t > {0}".format(random.randint(-100, 100)), 53 "UNNEST d.`int_arr` AS t where t < {0}".format(random.randint(-100, 100)), 54 "UNNEST d.`int_arr` AS t where t > {0} and t < {1}".format(random.randint(-100, 0), random.randint(0, 100)), 55 "UNNEST d.`int_arr` AS t where t between {0} and {1}".format(rando [all...] |
/6.0.3/godeps/src/github.com/couchbase/gocb/ |
H A D | logging.go | 55 Log(level LogLevel, offset int, format string, v ...interface{}) error 66 func (wrapper coreLogWrapper) Log(level LogLevel, offset int, format string, v ...interface{}) error { 67 return wrapper.wrapped.Log(gocbcore.LogLevel(level), offset+2, format, v...) 92 func (wrapper coreLogger) Log(level gocbcore.LogLevel, offset int, format string, v ...interface{}) error { 93 return wrapper.wrapped.Log(LogLevel(level), offset+2, format, v...) 115 func logExf(level LogLevel, offset int, format string, v ...interface{}) { 117 err := globalLogger.Log(level, offset+1, format, v...) 124 func logInfof(format string, v ...interface{}) { 125 logExf(LogInfo, 1, format, v...) 128 func logDebugf(format strin [all...] |
/6.0.3/testrunner/lib/membase/helper/ |
H A D | rebalance_helper.py | 21 log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, \ 51 log.info("{0} : {1}".format(stat_key, actual_stat_value)) 56 log.info("{0} : {1}".format(stat_key, actual_stat_value)) 66 log.info("no change in {0} stat after {1} seconds (value = {2})".format(stat_key, timeout_in_seconds, curr_stat_value)) 105 log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, \ 116 log.info("{0} : {1}".format(stat_key, stats[stat_key])) 122 log.info("{0} : {1}".format(stat_key, stats[stat_key])) 132 log.info("no change in {0} stat after {1} seconds (value = {2})".format(stat_key, timeout_in_seconds, curr_stat_value)) 142 log.info("unable to collect stats from server {0}".format(master)) 152 log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucke [all...] |
/6.0.3/testrunner/lib/couchbase_helper/ |
H A D | query_definitions.py | 55 query = "CREATE INDEX {0} ON {1}({2})".format(self.index_name, bucket, ",".join(self.index_fields)) 65 query = "CREATE INDEX {0} ON {1}({2})".format(self.index_name, bucket, collations) 93 ind_content["bucket"] = "{0}".format(bucket) 96 ind_content["exprType"] = "{0}".format(expr_type) 110 query = "DROP PRIMARY INDEX ON {0}".format(bucket) 140 query_template = FULL_SCAN_TEMPLATE.format("*","name IS NOT NULL"), 160 query_template = RANGE_SCAN_ORDER_BY_TEMPLATE.format(emit_fields,"job_title IS NOT NULL","job_title,_id"), 166 query_template = RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % "job_title = \"Sales\""), 172 query_template = RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % "job_title = \"Sales\" ORDER BY job_title "), 178 query_template = RANGE_SCAN_TEMPLATE.format(emit_field [all...] |
/6.0.3/godeps/src/github.com/couchbase/clog/ |
H A D | clog.go | 44 var logCallBack func(level, format string, args ...interface{}) string 137 func SetLoggerCallback(k func(level, format string, args ...interface{}) string) { 231 func To(key string, format string, args ...interface{}) { 234 str := logCallBack("INFO", format, args...) 239 logger.Printf(fgYellow+key+": "+reset+format, args...) 245 func Log(format string, args ...interface{}) { 248 str := logCallBack("INFO", format, args...) 253 logger.Printf(format, args...) 259 func Printf(format string, args ...interface{}) { 262 str := logCallBack("INFO", format, arg [all...] |
/6.0.3/testrunner/lib/ |
H A D | sdk_client.py | 39 connection_string = "{0}://{1}".format(scheme, ", ".join(hosts).replace(" ","")) 41 # connection_string = "{0}/{1}".format(connection_string, bucket) 43 connection_string = "{0}?{1}".format(connection_string, uhm_options) 46 connection_string = "{0},ipv6=allow".format(connection_string) 48 connection_string = "{0}?ipv6=allow".format(connection_string) 51 connection_string = "{0},compression=on".format(connection_string) 53 connection_string = "{0}?compression=on".format(connection_string) 56 connection_string = "{0},compression=off".format(connection_string) 58 connection_string = "{0}?compression=off".format(connection_string) 61 connection_string = "{0},certpath={1}".format(connection_strin [all...] |
/6.0.3/testrunner/pytests/cbas/ |
H A D | cbas_rbac.py | 144 "=== Operation {0} failed for user {1} while it should have worked".format( 149 "Operation : {0}, User : {1} = Works as expected".format( 156 "=== Operation {0} worked for user {1} while it should not have worked".format( 161 "Operation : {0}, User : {1} = Works as expected".format( 228 "^^^^^^^^^^^^^^ Status of drop bucket for user {0}: {1}".format( 239 create_idx_statement = "create index idx1 on {0}(city:String);".format( 246 drop_idx_statement = "drop index {0}.idx1;".format( 256 create_idx_statement = "create index idx1 on {0}(city:String);".format( 260 drop_idx_statement = "drop index {0}.idx1;".format( 267 drop_idx_statement = "drop index {0}.idx1;".format( [all...] |
/6.0.3/godeps/src/gopkg.in/couchbase/gocbcore.v7/ |
H A D | logging.go | 52 Log(level LogLevel, offset int, format string, v ...interface{}) error 60 func (l *defaultLogger) Log(level LogLevel, offset int, format string, v ...interface{}) error { 64 s := fmt.Sprintf(format, v...) 110 func logExf(level LogLevel, offset int, format string, v ...interface{}) { 112 err := globalLogger.Log(level, offset+1, format, v...) 119 func logDebugf(format string, v ...interface{}) { 120 logExf(LogDebug, 1, format, v...) 123 func logSchedf(format string, v ...interface{}) { 124 logExf(LogSched, 1, format, v...) 127 func logWarnf(format strin [all...] |
/6.0.3/testrunner/pytests/epengine/ |
H A D | opschangecas.py | 44 #print 'vbucket_id is {0}'.format(vbucket_id) 50 #print 'cas_a {0} '.format(cas_active) 54 self.assertTrue(cas_active == max_cas, '[ERROR]Max cas is not 0 it is {0}'.format(cas_active)) 64 #print 'replica CAS {0}'.format(replica_CAS) 65 #print 'replica ext meta {0}'.format(get_meta_resp) 77 #print 'active cas {0}'.format(active_CAS) 79 self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS)) 93 #print 'vbucket_id is {0}'.format(vbucket_id) 99 #print 'cas_a {0} '.format(cas_active) 103 self.assertTrue(cas_active == max_cas, '[ERROR]Max cas is not 0 it is {0}'.format(cas_activ [all...] |
/6.0.3/ns_server/ |
H A D | cluster_connect | 114 "deployment plan {5}\n".format(nodes, buckettype, 122 print "Connecting node 0 with services {0}".format(str(services)) 123 o.open("http://{0}:{1}/node/controller/setupServices".format(addr, base_port), 124 "services={0}".format(",".join(services))).read() 125 o.open("http://{0}:{1}/pools/default".format(addr, base_port), 128 o.open("http://{0}:{1}/pools/default/buckets".format(addr, base_port), 136 o.open("http://{0}:{1}/settings/web".format(addr, base_port), 143 print "Connecting node {0} with services {1}".format(i, str(services)) 144 o.open("http://{0}:{1}/node/controller/doJoinCluster".format(addr, port), 146 "clusterMemberHostIp={0}".format(add [all...] |
/6.0.3/testrunner/pytests/security/ |
H A D | portscan.py | 86 self.log.info('{0} Testing port {1}'.format(s,i)) 87 cmd = self.TEST_SSL_FILENAME + ' -p --warnings off --color 0 {0}:{1}'.format( s.ip, i) 88 self.log.info('The command is {0}'.format( cmd ) ) 97 msg='TLS 1.1 is incorrect enabled on port {0}'.format(i)) 100 msg='TLS 1 is incorrect enabled on port {0}'.format(i)) 140 # Note this analysis is very coupled to the testssl.sh output format. If it changes this code may break. 146 self.log.info('Testing port {0}'.format(i)) 147 cmd = self.TEST_SSL_FILENAME + ' --warnings off --color 0 {0}:{1}'.format( self.master.ip, i) 155 self.log.error('Heartbleed vulnerability on port {0}'.format(i)) 162 self.log.error('Poodle vulnerability on port {0}'.format( [all...] |
/6.0.3/kv_engine/scripts/ |
H A D | get_cbmonitor_data.py | 40 # data format: [[timestamp1,value1],[timestamp2,value2],..] 176 # url format: [host + "/" + "<dataset>" + snapshot [+ "<ip/bucket>"] + 229 ('ops', '{:.2f}'.format(getAverage(ops))), 230 ('latency_set (ms)', '{:.2f}'.format(getAverage(latency_set)) 232 ('latency_get (ms)', '{:.2f}'.format(getAverage(latency_get)) 234 ('latency_set P99 (ms)', '{:.2f}'.format(getP99(latency_set)) 236 ('latency_get P99 (ms)', '{:.2f}'.format(getP99(latency_get)) 239 .format(getAverage(avg_disk_commit_time) * 241 ('avg_bg_wait_time (ms)', '{:.2f}'.format(getAverage(avg_bg_wait_time) / 244 .format(getP9 [all...] |