I tracked down the path, which is confirmed by this trace. From the logs
can be seen that it is just next chunk in memory.
Post by å¼ é¡ºé4.2.0 have a core
#0 0x00007ffff7488265 in raise () from /lib64/libc.so.6
No symbol table info available.
#1 0x00007ffff7489d10 in abort () from /lib64/libc.so.6
No symbol table info available.
#2 0x0000000000615b74 in qm_debug_frag (qm=0x7ffff6c55010,
f=0x7ffff6cdcf08) at mem/q_malloc.c:161
__FUNCTION__ = "qm_debug_frag"
#3 0x0000000000616d87 in qm_malloc (qm=0x7ffff6c55010, size=56,
file=0x7ffff5fedef8 "<core>: db_res.c", func=0x7ffff5fee288
"db_new_result", line=112)
at mem/q_malloc.c:388
f = 0x7ffff6cdcf08
hash = 2057
list_cntr = 1
__FUNCTION__ = "qm_malloc"
#4 0x00007ffff5fdaa2f in db_new_result () at db_res.c:112
r = 0x0
__FUNCTION__ = "db_new_result"
#5 0x00007ffff6a386d6 in db_mysql_new_result () at km_res.c:235
obj = 0x0
__FUNCTION__ = "db_mysql_new_result"
#6 0x00007ffff6a29c7a in db_mysql_store_result (_h=0x7ffff6cb4e78,
_r=0x7fffffffdf38) at km_dbase.c:227
code = -165731152
__FUNCTION__ = "db_mysql_store_result"
#7 0x00007ffff5fd3516 in db_do_query_internal (_h=0x7ffff6cb4e78,
_k=0x7fffffffdf70, _op=0x0, _v=0x7fffffffdf40, _c=0x7fffffffdf60,
_n=1, _nc=1, _o=0x0,
_r=0x7fffffffdf38, val2str=0x7ffff6a39374 <db_mysql_val2str>,
submit_query=0x7ffff6a2825c <db_mysql_submit_query>,
store_result=0x7ffff6a2998c <db_mysql_store_result>, _l=0) at
db_query.c:137
tmp = 32767
off = 63
ret = 23
__FUNCTION__ = "db_do_query_internal"
#8 0x00007ffff5fd3fbd in db_do_query (_h=0x7ffff6cb4e78,
_k=0x7fffffffdf70, _op=0x0, _v=0x7fffffffdf40, _c=0x7fffffffdf60,
_n=1, _nc=1, _o=0x0, _r=0x7fffffffdf38,
val2str=0x7ffff6a39374 <db_mysql_val2str>,
submit_query=0x7ffff6a2825c <db_mysql_submit_query>,
store_result=0x7ffff6a2998c <db_mysql_store_result>)
at db_query.c:156
No locals.
#9 0x00007ffff6a2b04e in db_mysql_query (_h=0x7ffff6cb4e78,
_k=0x7fffffffdf70, _op=0x0, _v=0x7fffffffdf40, _c=0x7fffffffdf60,
_n=1, _nc=1, _o=0x0, _r=0x7fffffffdf38)
at km_dbase.c:323
No locals.
#10 0x00007ffff5fcd6e9 in db_table_version (dbf=0x7ffff2d1e3c0,
connection=0x7ffff6cb4e78, table=0x7ffff2d1d990) at db.c:400
key = {0x7fffffffdf20}
col = {0x7fffffffdf10}
val = {{type = DB1_STR, nul = 0, free = -167987365, val =
{int_val = -223246127, ll_val = 140737265109201, double_val =
6.9533447780108125e-310,
time_val = 140737265109201, string_val = 0x7ffff2b188d1
"dispatcher", str_val = {s = 0x7ffff2b188d1 "dispatcher", len = 10},
blob_val = {s = 0x7ffff2b188d1 "dispatcher", len = 10}, bitmap_val =
4071721169}}}
res = 0x0
ver = 0x0
version = 0x9c7be0
tmp1 = {s = 0x7ffff5feb0e1 "table_name", len = 10}
tmp2 = {s = 0x7ffff5feb0ec "table_version", len = 13}
ret = 8
__FUNCTION__ = "db_table_version"
#11 0x00007ffff2aeab2f in init_ds_db () at dispatch.c:666
ret = 297
__FUNCTION__ = "init_ds_db"
#12 0x00007ffff2b04434 in mod_init () at dispatcher.c:317
avp_spec = {type = 48, getf = 0x7fffffffe190, setf =
0x7fffffffe0d0, pvp = {pvn = {type = 16, nfree = 0x3000000028, u =
{isname = {type = -7760, name = {
n = 7292328, s = {s = 0x6f45a8 "DEBUG", len =
7555677}, re = 0x6f45a8}}, dname = 0x3ffffe1b0}}, pvi = {type =
7555621, u = {ival = -223246127,
dval = 0x7ffff2b188d1}}}, trans = 0x0}
__FUNCTION__ = "mod_init"
#13 0x000000000058f59d in init_mod (m=0x7ffff6cb3740) at sr_module.c:966
__FUNCTION__ = "init_mod"
#14 0x000000000058f8a2 in init_modules () at sr_module.c:995
t = 0x7ffff77ac500
i = 1187628493
__FUNCTION__ = "init_modules"
#15 0x00000000004aa2b0 in main (argc=5, argv=0x7fffffffe528) at
main.c:2501
cfg_stream = 0xa94020
c = -1
r = 32767
tmp = 0x7ffff7ffd000 ""
tmp_len = 32767
port = -7008
proto = 32767
options = 0x7023e0
":f:cm:M:dVIhEeb:l:L:n:vKrRDTN:W:w:t:u:g:P:G:SQ:O:a:A:"
ret = -1
seed = 39348307
rfd = 8
debug_save = 0
debug_flag = 0
dont_fork_cnt = 0
n_lst = 0xf0b2ff
p = 0xbf <Address 0xbf out of bounds>
__FUNCTION__ = "main"
What are the differences between the server that works and the one
that doesn't? I mean hardware and operating system details?
Checking quickly the source code and comparing with logs, it
doesn't reveal any problem -- it is about allocation of the next
fragment, which was not used at all before, after the one
allocated previously. The only reason I can think of it right now
is corrupted memory or faulty OS.
Daniel
Post by å¼ é¡ºé4.2 have the same problem.
at begin i use 4.1, i think this is a bug in 4.1; then i use 4.2
to check to see if the problem is solved. problems still in.
see ka_4_2_0.log
â
ka_4_2_0.log
<https://docs.google.com/file/d/0B5x1TDtoeVvAckdMbDRmT3E3UjA/edit?usp=drive_web>
â
ka.log is genrate by kamailio 4.1.6.
2014-10-20 17:44 GMT+08:00 Daniel-Constantin Mierla
Are you using 4.1 or 4.2?
Because the subject mentioned 4.2 but the version of
kamailio is 4.1
Daniel
Post by å¼ é¡ºé-M 12 can not help. all config is same.
see ka.log in attachment, kamailio version is 4.1.6
version: kamailio 4.1.6 (x86_64/linux) 010d57
flags: STATS: Off, USE_TCP, USE_TLS, TLS_HOOKS,
USE_RAW_SOCKS,
Post by å¼ é¡ºéDISABLE_NAGLE, USE_MCAST, DNS_IP_HACK, SHM_MEM,
SHM_MMAP, PKG_MALLOC,
Post by å¼ é¡ºéDBG_QM_MALLOC, USE_FUTEX, FAST_LOCK-ADAPTIVE_WAIT,
USE_DNS_CACHE,
Post by å¼ é¡ºéUSE_DNS_FAILOVER, USE_NAPTR, USE_DST_BLACKLIST,
HAVE_RESOLV_RES
Post by å¼ é¡ºéADAPTIVE_WAIT_LOOPS=1024, MAX_RECV_BUFFER_SIZE 262144,
MAX_LISTEN 16,
Post by å¼ é¡ºéMAX_URI_SIZE 1024, BUF_SIZE 65535, DEFAULT PKG_SIZE 4MB
poll method support: poll, epoll_lt, epoll_et,
sigio_rt, select.
Post by å¼ é¡ºéid: 010d57
compiled on 16:48:03 Oct 20 2014 with gcc 4.1.2
--
Daniel-Constantin Mierla
http://twitter.com/#!/miconda
<http://twitter.com/#%21/miconda> -
http://www.linkedin.com/in/miconda
--
Daniel-Constantin Mierla
http://twitter.com/#!/miconda <http://twitter.com/#%21/miconda> - http://www.linkedin.com/in/miconda