#Wildfly Toolbelt
Basically, you need to open domain.xml
under WILDFLY_HOME/domain/configuration
and find the <deployments>
session. In that session, each deployment contain a <content>
tag. The attribute sha1
contains details where the file is storaged on the filesystem. EG:
<deployments>
# Install gosu. https://github.com/tianon/gosu
ENV GOSU_VERSION=1.11
RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \
&& curl -o /usr/local/bin/gosu -SL "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-amd64" \
&& curl -o /usr/local/bin/gosu.asc -SL "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-amd64.asc" \
&& gpg --verify /usr/local/bin/gosu.asc \
&& rm /usr/local/bin/gosu.asc \
&& rm -r /root/.gnupg/ \
&& chmod +x /usr/local/bin/gosu \
# switch the log level to DEBUG on console | |
{wildfly}/bin/jboss-cli.sh --connect | |
[standalone@localhost:9990 /] /subsystem=logging/console-handler=CONSOLE:write-attribute(name=level,value=DEBUG) | |
[standalone@localhost:9990 /] /subsystem=logging/root-logger=ROOT:write-attribute(name=level,value=DEBUG) | |
# switch it back to whatever it was initial configuration (here it is INFO) | |
[standalone@localhost:9990 /] /subsystem=logging/console-handler=CONSOLE:write-attribute(name=level,value=INFO) | |
[standalone@localhost:9990 /] /subsystem=logging/root-logger=ROOT:write-attribute(name=level,value=INFO) |
memuse measures unique physical total memory taken by a process and its children, ignoring duplicate copy-on-write pages and shared memory.
This is a solution for http://serverfault.com/questions/676335/how-measure-memory-without-copy-on-write-pages
It's a quick and dirty utility, but feel free to fork & improve.
Example:
~ » sudo ./memuse.py 15897 eugene@eugene-thinkpad
PID Commandline Frames (+unique) VMEM
#!/bin/bash | |
# | |
# O pgbadger no modo incremental gera um indice (arquivos .bin) | |
# para fazer o parser do log, e não possui um mecanismo interno | |
# de limpeza de arquivos que não estão mais em uso. | |
# | |
# Este script efetua a limpeza dos .bin obsoletos, ou seja, | |
# aqueles que são de dias anteriores aos correspondentes a | |
# semana corrente. |
WITH btree_index_atts AS ( | |
SELECT nspname, relname, reltuples, relpages, indrelid, relam, | |
regexp_split_to_table(indkey::text, ' ')::smallint AS attnum, | |
indexrelid as index_oid | |
FROM pg_index | |
JOIN pg_class ON pg_class.oid=pg_index.indexrelid | |
JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace | |
JOIN pg_am ON pg_class.relam = pg_am.oid | |
WHERE pg_am.amname = 'btree' | |
), |
// You can edit this code! | |
// Click here and start typing. | |
package main | |
import "fmt" | |
import "runtime" | |
import "strings" | |
func identifyPanic() string { | |
var name, file string |
CREATE OR REPLACE FUNCTION public.json_append(data json, insert_data json) | |
RETURNS json | |
IMMUTABLE | |
LANGUAGE sql | |
AS $$ | |
SELECT ('{'||string_agg(to_json(key)||':'||value, ',')||'}')::json | |
FROM ( | |
SELECT * FROM json_each(data) | |
UNION ALL | |
SELECT * FROM json_each(insert_data) |
Moved to git repository: https://github.com/denji/nginx-tuning
For this configuration you can use web server you like, i decided, because i work mostly with it to use nginx.
Generally, properly configured nginx can handle up to 400K to 500K requests per second (clustered), most what i saw is 50K to 80K (non-clustered) requests per second and 30% CPU load, course, this was 2 x Intel Xeon
with HyperThreading enabled, but it can work without problem on slower machines.
You must understand that this config is used in testing environment and not in production so you will need to find a way to implement most of those features best possible for your servers.