Skip to content

Instantly share code, notes, and snippets.

@Karm
Created October 7, 2014 14:02
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Karm/b1ad3704846a722f9eec to your computer and use it in GitHub Desktop.
Save Karm/b1ad3704846a722f9eec to your computer and use it in GitHub Desktop.
MODCLUSTER-430 pastebin...
mod_manager.c(1907): manager_trans INFO (/)
mod_manager.c(2623): manager_handler INFO (/) processing: ""
mod_manager.c(2672): manager_handler INFO OK
mod_manager.c(1907): manager_trans CONFIG (/)
mod_manager.c(2623): manager_handler CONFIG (/) processing: "JVMRoute=jboss-eap-6.3-2&Host=192.168.122.204&Maxattempts=1&Port=8110&StickySessionForce=No&Type=ajp&ping=10"
mod_manager.c(2672): manager_handler CONFIG OK
mod_manager.c(1907): manager_trans ENABLE-APP (/)
mod_manager.c(2623): manager_handler ENABLE-APP (/) processing: "JVMRoute=jboss-eap-6.3-2&Alias=default-host%2Clocalhost%2Cexample.com&Context=%2Fclusterbench"
mod_manager.c(1347): process_appl_cmd: adding vhost: 1 node: 1
mod_manager.c(2672): manager_handler ENABLE-APP OK
mod_manager.c(1907): manager_trans STATUS (/)
mod_manager.c(2623): manager_handler STATUS (/) processing: "JVMRoute=jboss-eap-6.3-2&Load=100"
mod_manager.c(1622): Processing STATUS
mod_proxy_cluster.c(785): KARM node->mess.balancer=qacluster, node->mess.Host=192.168.122.204, node->mess.Port=8110, name=balancer://qacluster
mod_proxy_cluster.c(787): KARM balancer=0, creat_bal=2, s=7fb8a474a880, s->server_hostname:port=192.168.122.204:2182, s->server_scheme=(null), s->defn_name=(null), s->is_virtual=0 main_server=7fb8a474a880, main_server->server_hostname:port=192.168.122.204:2182, main_server->server_scheme=(null), main_server->defn_name=(null), main_server->is_virtual=0
mod_proxy_cluster.c(794): KARM GONNA U S E this one s=7fb8a474a880... creat_bal=2
mod_proxy_cluster.c(679): add_balancer_node: Create balancer balancer://qacluster
mod_proxy_cluster.c(445): Created: worker for ajp://192.168.122.204:8110
mod_proxy_cluster.c(572): proxy: initialized single connection worker 1 in child 29480 for (192.168.122.204)
mod_proxy_cluster.c(625): Created: worker for ajp://192.168.122.204:8110 1 (status): 1
mod_proxy_cluster.c(785): KARM node->mess.balancer=qacluster, node->mess.Host=192.168.122.204, node->mess.Port=8110, name=balancer://qacluster
mod_proxy_cluster.c(787): KARM balancer=7fb8a47b3f50, creat_bal=2, s=7fb8a48cb278, s->server_hostname:port=192.168.122.204:2181, s->server_scheme=(null), s->defn_name=/dev/shm/mod_cluster-eapx/jboss-ews-2.1/httpd/conf.d/mod_cluster.conf, s->is_virtual=1 main_server=7fb8a474a880, main_server->server_hostname:port=192.168.122.204:2182, main_server->server_scheme=(null), main_server->defn_name=(null), main_server->is_virtual=0
mod_proxy_cluster.c(790): KARM GONNA S K I P this one s=7fb8a48cb278... creat_bal=2
mod_proxy_cluster.c(785): KARM node->mess.balancer=qacluster, node->mess.Host=192.168.122.204, node->mess.Port=8110, name=balancer://qacluster
mod_proxy_cluster.c(787): KARM balancer=7fb8a47b3f50, creat_bal=2, s=7fb8a4755c50, s->server_hostname:port=192.168.122.204:2180, s->server_scheme=(null), s->defn_name=/dev/shm/mod_cluster-eapx/jboss-ews-2.1/httpd/conf.d/mod_cluster.conf, s->is_virtual=1 main_server=7fb8a474a880, main_server->server_hostname:port=192.168.122.204:2182, main_server->server_scheme=(null), main_server->defn_name=(null), main_server->is_virtual=0
mod_proxy_cluster.c(790): KARM GONNA S K I P this one s=7fb8a4755c50... creat_bal=2
mod_proxy_cluster.c(785): KARM node->mess.balancer=qacluster, node->mess.Host=192.168.122.204, node->mess.Port=8110, name=balancer://qacluster
mod_proxy_cluster.c(787): KARM balancer=7fb8a47b3f50, creat_bal=2, s=7fb8a48cb070, s->server_hostname:port=192.168.122.204:8847, s->server_scheme=(null), s->defn_name=/dev/shm/mod_cluster-eapx/jboss-ews-2.1/httpd/conf.d/mod_cluster.conf, s->is_virtual=1 main_server=7fb8a474a880, main_server->server_hostname:port=192.168.122.204:2182, main_server->server_scheme=(null), main_server->defn_name=(null), main_server->is_virtual=0
mod_proxy_cluster.c(790): KARM GONNA S K I P this one s=7fb8a48cb070... creat_bal=2
proxy_util.c(2018): proxy: ajp: has acquired connection for (192.168.122.204)
proxy_util.c(2074): proxy: connecting ajp://192.168.122.204:8110/ to 192.168.122.204:8110
proxy_util.c(2200): proxy: connected / to 192.168.122.204:8110
proxy_util.c(2451): proxy: ajp: fam 2 socket created to connect to 192.168.122.204
mod_proxy_cluster.c(1392): proxy_cluster_try_pingpong: connected to backend
mod_proxy_cluster.c(1116): ajp_cping_cpong: Done
proxy_util.c(2036): proxy: ajp: has released connection for (192.168.122.204)
mod_manager.c(2672): manager_handler STATUS OK
# These are THE ONLY Listen directives, there is nothing in httpd.conf
CreateBalancers 2
# My Main Server
Listen 192.168.122.204:2182
ServerName 192.168.122.204:2182
DocumentRoot www/main_server/
# My first VirtualHost
Listen 192.168.122.204:8847
<VirtualHost 192.168.122.204:8847>
ServerName 192.168.122.204:8847
DocumentRoot www/vh1
<Directory />
Order deny,allow
Deny from all
Allow from all
</Directory>
KeepAliveTimeout 60
MaxKeepAliveRequests 0
ServerAdvertise on
AdvertiseFrequency 5
ManagerBalancerName qacluster
AdvertiseGroup 224.0.5.79:65009
EnableMCPMReceive
<Location /mcm>
SetHandler mod_cluster-manager
Order deny,allow
Deny from all
Allow from all
</Location>
</VirtualHost>
#My second VirtualHost
Listen 192.168.122.204:2180
<VirtualHost 192.168.122.204:2180>
ServerName 192.168.122.204:2180
DocumentRoot www/vh2
</VirtualHost>
#My third VirtualHost
Listen 192.168.122.204:2181
<VirtualHost 192.168.122.204:2181>
ServerName 192.168.122.204:2181
DocumentRoot www/vh2
</VirtualHost>
+++ b/native/mod_proxy_cluster/mod_proxy_cluster.c
@@ -780,11 +780,18 @@ static void add_balancers_workers(nodeinfo_t *node, apr_pool_t *pool)
#else
proxy_balancer *balancer = ap_proxy_get_balancer(pool, conf, name);
#endif
+ /*KARM: Here we go --> MODCLUSTER-430 */
- if (!balancer && (creat_bal == CREAT_NONE ||
- (creat_bal == CREAT_ROOT && s!=main_server))) {
+ /*KARM*/ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "KARM node->mess.balancer=%s, node->mess.Host=%s, node->mess.Port=%s, name=%s", node->mess.balancer, node->mess.Host, node->mess.Port, name);
+
+ /*KARM*/ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "KARM balancer=%pp, creat_bal=%d, s=%pp, s->server_hostname:port=%s:%d, s->server_scheme=%s, s->defn_name=%s, s->is_virtual=%d main_server=%pp,
+ balancer, creat_bal, s, s->server_hostname, s->port, s->server_scheme, s->defn_name, s->is_virtual, main_server, main_server->server_hostname, main_
+ if (creat_bal == CREAT_NONE || (creat_bal == CREAT_ROOT && s != main_server)) {
+ /*KARM*/ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "KARM GONNA S K I P this one s=%pp... creat_bal=%d", s, creat_bal);
s = s->next;
continue;
+ } else {
+ /*KARM*/ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "KARM GONNA U S E this one s=%pp... creat_bal=%d", s, creat_bal);
}
if (!balancer)
balancer = add_balancer_node(node, conf, pool, s);
@@ -850,8 +857,9 @@ static void add_balancers_workers(nodeinfo_t *node, apr_pool_t *pool)
}
#endif
}
- if (balancer)
+ if (balancer) {
create_worker(conf, balancer, s, node, pool);
+ }
s = s->next;
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment