source: trunk/oscam-reader.c@ 1269

Last change on this file since 1269 was 1217, checked in by cogsi, 13 years ago

speedfrog's cccam-reader updates (thanks!)

File size: 15.2 KB
Line 
1#include "globals.h"
2
3int ridx=0, logfd=0;
4
5static int proxy;
6static struct s_emm *emmcache;
7static int last_idx=1;
8static ushort idx=1;
9
10void cs_ri_brk(int flag)
11{
12#ifdef CS_RDR_INIT_HIST
13 static int brk_pos=0;
14 if (flag)
15 brk_pos=reader[ridx].init_history_pos;
16 else
17 reader[ridx].init_history_pos=brk_pos;
18#endif
19}
20
21void cs_ri_log(char *fmt,...)
22{
23 char txt[256];
24
25 va_list params;
26 va_start(params, fmt);
27 vsprintf(txt, fmt, params);
28 va_end(params);
29 cs_log("%s", txt);
30#ifdef CS_RDR_INIT_HIST
31 int val;
32 val=sizeof(reader[ridx].init_history)-reader[ridx].init_history_pos-1;
33 if (val>0)
34 snprintf((char *) reader[ridx].init_history+reader[ridx].init_history_pos, val, "%s", txt);
35 reader[ridx].init_history_pos+=strlen(txt)+1;
36#endif
37}
38
39static void casc_check_dcw(int idx, int rc, uchar *cw)
40{
41 int i;
42 for (i=1; i<CS_MAXPENDING; i++)
43 {
44 if ((ecmtask[i].rc>=10) &&
45 (!memcmp(ecmtask[i].ecmd5, ecmtask[idx].ecmd5, CS_ECMSTORESIZE)))
46 {
47 if (rc)
48 {
49 ecmtask[i].rc=(i==idx) ? 1 : 2;
50 if(ecmtask[i].gbxRidx)ecmtask[i].rc=0;
51 memcpy(ecmtask[i].cw, cw, 16);
52 }
53 else
54 ecmtask[i].rc=0;
55 write_ecm_answer(fd_c2m, &ecmtask[i]);
56 ecmtask[i].idx=0;
57 }
58 }
59}
60
61static int casc_recv_timer(uchar *buf, int l, int msec)
62{
63 struct timeval tv;
64 fd_set fds;
65 int rc;
66
67 if (!pfd) return(-1);
68 tv.tv_sec = msec/1000;
69 tv.tv_usec = (msec%1000)*1000;
70 FD_ZERO(&fds);
71 FD_SET(pfd, &fds);
72 select(pfd+1, &fds, 0, 0, &tv);
73 rc=0;
74 if (FD_ISSET(pfd, &fds))
75 if (!(rc=reader[ridx].ph.recv(buf, l)))
76 rc=-1;
77
78 return(rc);
79}
80
81static int connect_nonb(int sockfd, const struct sockaddr *saptr, socklen_t salen, int nsec)
82{
83 int flags, n, error;
84 socklen_t len;
85 fd_set rset, wset;
86 struct timeval tval;
87
88 flags = fcntl(sockfd, F_GETFL, 0);
89 fcntl(sockfd, F_SETFL, flags | O_NONBLOCK);
90
91 error = 0;
92 cs_debug("conn_nb 1 (fd=%d)", sockfd);
93
94 if ( (n = connect(sockfd, saptr, salen)) < 0) {
95 if( errno==EALREADY ) {
96 cs_debug("conn_nb in progress, errno=%d", errno);
97 return(-1);
98 }
99 else if( errno==EISCONN ) {
100 cs_debug("conn_nb already connected, errno=%d", errno);
101 goto done;
102 }
103 cs_debug("conn_nb 2 (fd=%d)", sockfd);
104 if (errno != EINPROGRESS) {
105 cs_debug("conn_nb 3 (fd=%d)", sockfd);
106 return(-1);
107 }
108 }
109
110 cs_debug("n = %d\n", n);
111
112 /* Do whatever we want while the connect is taking place. */
113 if (n == 0)
114 goto done; /* connect completed immediately */
115
116 FD_ZERO(&rset);
117 FD_SET(sockfd, &rset);
118 wset = rset;
119 tval.tv_sec = nsec;
120 tval.tv_usec = 0;
121
122 if ( (n = select(sockfd+1, &rset, &wset, 0, nsec ? &tval : 0)) == 0) {
123 //close(sockfd); // timeout
124 cs_debug("conn_nb 4 (fd=%d)", sockfd);
125 errno = ETIMEDOUT;
126 return(-1);
127 }
128
129 cs_debug("conn_nb 5 (fd=%d)", sockfd);
130
131 if (FD_ISSET(sockfd, &rset) || FD_ISSET(sockfd, &wset)) {
132 cs_debug("conn_nb 6 (fd=%d)", sockfd);
133 len = sizeof(error);
134 if (getsockopt(sockfd, SOL_SOCKET, SO_ERROR, &error, &len) < 0) {
135 cs_debug("conn_nb 7 (fd=%d)", sockfd);
136 return(-1); // Solaris pending error
137 }
138 } else {
139 cs_debug("conn_nb 8 (fd=%d)", sockfd);
140 return -2;
141 }
142
143done:
144cs_debug("conn_nb 9 (fd=%d)", sockfd);
145 fcntl(sockfd, F_SETFL, flags); /* restore file status flags */
146
147 if (error) {
148 cs_debug("cccam: conn_nb 10 (fd=%d)", sockfd);
149 //close(sockfd); /* just in case */
150 errno = error;
151 return(-1);
152 }
153 return(0);
154}
155
156int network_tcp_connection_open()
157{
158 int flags;
159 if( connect_nonb(client[cs_idx].udp_fd,
160 (struct sockaddr *)&client[cs_idx].udp_sa,
161 sizeof(client[cs_idx].udp_sa), 5) < 0)
162 {
163 cs_log("connect(fd=%d) failed: (errno=%d)", client[cs_idx].udp_fd, errno);
164 return -1;
165 }
166 flags = fcntl(client[cs_idx].udp_fd, F_GETFL, 0);
167 flags &=~ O_NONBLOCK;
168 fcntl(client[cs_idx].udp_fd, F_SETFL, flags );
169
170 return client[cs_idx].udp_fd;
171}
172
173void network_tcp_connection_close(int fd)
174{
175 cs_debug("tcp_conn_close(): fd=%d, is_server=%d", fd, is_server);
176 close(fd);
177 client[cs_idx].udp_fd = 0;
178
179 if (!is_server)
180 {
181 int i;
182 pfd=0;
183 reader[ridx].tcp_connected = 0;
184
185 for (i=0; i<CS_MAXPENDING; i++)
186 {
187 ecmtask[i].idx=0;
188 ecmtask[i].rc=0;
189 }
190
191 reader[ridx].ncd_msgid=0;
192 reader[ridx].last_s=reader[ridx].last_g=0;
193
194 if (reader[ridx].ph.c_init())
195 {
196 cs_debug("network_tcp_connection_close() exit(1);");
197 cs_exit(1);
198 }
199
200 cs_resolve();
201// cs_log("last_s=%d, last_g=%d", reader[ridx].last_s, reader[ridx].last_g);
202 }
203}
204
205static void casc_do_sock_log()
206{
207 int i, idx;
208 ushort caid, srvid;
209 ulong provid;
210
211 idx=reader[ridx].ph.c_recv_log(&caid, &provid, &srvid);
212 client[cs_idx].last=time((time_t)0);
213 if (idx<0) return; // no dcw-msg received
214
215 for (i=1; i<CS_MAXPENDING; i++)
216 {
217 if ( (ecmtask[i].rc>=10)
218 && (ecmtask[i].idx==idx)
219 && (ecmtask[i].caid==caid)
220 && (ecmtask[i].prid==provid)
221 && (ecmtask[i].srvid==srvid))
222 {
223 casc_check_dcw(i, 0, ecmtask[i].cw); // send "not found"
224 break;
225 }
226 }
227}
228
229static void casc_do_sock(int w)
230{
231 int i, n, idx, rc, j;
232 uchar buf[1024];
233 uchar dcw[16];
234
235 if ((n=casc_recv_timer(buf, sizeof(buf), w))<=0)
236 {
237 if (reader[ridx].ph.type==MOD_CONN_TCP && reader[ridx].typ != R_RADEGAST)
238 {
239 cs_debug("casc_do_sock: close connection");
240 network_tcp_connection_close(client[cs_idx].udp_fd);
241 }
242 return;
243 }
244 client[cs_idx].last=time((time_t)0);
245 idx=reader[ridx].ph.c_recv_chk(dcw, &rc, buf, n);
246
247 if (idx<0) return; // no dcw received
248 reader[ridx].last_g=time((time_t*)0); // for reconnect timeout
249//cs_log("casc_do_sock: last_s=%d, last_g=%d", reader[ridx].last_s, reader[ridx].last_g);
250 if (!idx) idx=last_idx;
251 j=0;
252 for (i=1; i<CS_MAXPENDING; i++)
253 {
254
255 if (ecmtask[i].idx==idx)
256 {
257 casc_check_dcw(i, rc, dcw);
258 j=1;
259 break;
260 }
261 }
262}
263
264static void casc_get_dcw(int n)
265{
266 int w;
267 struct timeb tps, tpe;
268 tpe=ecmtask[n].tps;
269 //tpe.millitm+=1500; // TODO: timeout of 1500 should be config
270 tpe.millitm+=cfg->srtimeout;
271 tpe.time+=(tpe.millitm/1000);
272 tpe.millitm%=1000;
273
274 cs_ftime(&tps);
275 while (((w=1000*(tpe.time-tps.time)+tpe.millitm-tps.millitm)>0)
276 && (ecmtask[n].rc>=10))
277 {
278 casc_do_sock(w);
279 cs_ftime(&tps);
280 }
281 if (ecmtask[n].rc>=10)
282 casc_check_dcw(n, 0, ecmtask[n].cw); // simulate "not found"
283}
284
285
286
287int casc_process_ecm(ECM_REQUEST *er)
288{
289 int rc, n, i, sflag;
290 time_t t;//, tls;
291
292 uchar buf[512];
293
294 t=time((time_t *)0);
295 for (n=0, i=sflag=1; i<CS_MAXPENDING; i++)
296 {
297 if ((t-(ulong)ecmtask[i].tps.time > ((cfg->ctimeout + 500) / 1000) + 1) &&
298 (ecmtask[i].rc>=10)) // drop timeouts
299 {
300 ecmtask[i].rc=0;
301 }
302 if ((!n) && (ecmtask[i].rc<10)) // free slot found
303 n=i;
304 if ((ecmtask[i].rc>=10) && // ecm already pending
305 (!memcmp(er->ecmd5, ecmtask[i].ecmd5, CS_ECMSTORESIZE)) &&
306 (er->level<=ecmtask[i].level)) // ... this level at least
307 sflag=0;
308 }
309 if (!n)
310 {
311 cs_log("WARNING: ecm pending table overflow !!");
312 return(-2);
313 }
314 memcpy(&ecmtask[n], er, sizeof(ECM_REQUEST));
315 if( reader[ridx].typ == R_NEWCAMD )
316 ecmtask[n].idx=(reader[ridx].ncd_msgid==0)?2:reader[ridx].ncd_msgid+1;
317 else
318 ecmtask[n].idx=idx++;
319 ecmtask[n].rc=10;
320 cs_debug("---- ecm_task %d, idx %d, sflag=%d, level=%d",
321 n, ecmtask[n].idx, sflag, er->level);
322
323 if( reader[ridx].ph.type==MOD_CONN_TCP && reader[ridx].tcp_rto )
324 {
325 int rto = abs(reader[ridx].last_s - reader[ridx].last_g);
326 if (rto >= reader[ridx].tcp_rto)
327 {
328 cs_debug("rto=%d", rto);
329 network_tcp_connection_close(client[cs_idx].udp_fd);
330 }
331 }
332
333 if (cfg->show_ecm_dw || client[cs_idx].dbglvl)
334 cs_dump(er->ecm, er->l, "casc ecm:");
335 rc=0;
336 if (sflag)
337 {
338 if (!client[cs_idx].udp_sa.sin_addr.s_addr) // once resolved at least
339 cs_resolve();
340
341 if ((rc=reader[ridx].ph.c_send_ecm(&ecmtask[n], buf)))
342 casc_check_dcw(n, 0, ecmtask[n].cw); // simulate "not found"
343 else
344 last_idx = ecmtask[n].idx;
345 reader[ridx].last_s = t; // used for inactive_timeout and reconnect_timeout in TCP reader
346
347 if (!reader[ridx].ph.c_multi)
348 casc_get_dcw(n);
349 }
350
351//cs_log("casc_process_ecm 1: last_s=%d, last_g=%d", reader[ridx].last_s, reader[ridx].last_g);
352
353 if (idx>0x1ffe) idx=1;
354 return(rc);
355}
356
357static int reader_store_emm(uchar *emm, uchar type)
358{
359 static int rotate=0;
360 int rc;
361 memcpy(emmcache[rotate].emm, emm, emm[2]);
362 emmcache[rotate].type=type;
363 emmcache[rotate].count=1;
364// cs_debug("EMM stored (index %d)", rotate);
365 rc=rotate;
366 rotate=(rotate+1) % CS_EMMCACHESIZE;
367 return(rc);
368}
369
370static void reader_get_ecm(ECM_REQUEST *er)
371{
372 //cs_log("hallo idx:%d rc:%d caid:%04X",er->idx,er->rc,er->caid);
373 if ((er->rc<10) )
374 {
375 send_dcw(er);
376 return;
377 }
378 er->ocaid=er->caid;
379 if (!chk_bcaid(er, &reader[ridx].ctab))
380 {
381 cs_debug("caid %04X filtered", er->caid);
382 er->rcEx=E2_CAID;
383 er->rc=0;
384 write_ecm_answer(fd_c2m, er);
385 return;
386 }
387 if (check_ecmcache(er, client[er->cidx].grp))
388 {
389 er->rc=2;
390 write_ecm_answer(fd_c2m, er);
391 return;
392 }
393 if (proxy)
394 {
395 client[cs_idx].last_srvid=er->srvid;
396 client[cs_idx].last_caid=er->caid;
397 casc_process_ecm(er);
398 return;
399 }
400 if (cfg->show_ecm_dw || client[cs_idx].dbglvl)
401 cs_dump(er->ecm, er->l, "ecm:");
402 er->rc=reader_ecm(er);
403 write_ecm_answer(fd_c2m, er);
404 reader_post_process();
405 //if(reader[ridx].typ=='r') reader[ridx].qlen--;
406}
407
408static void reader_send_DCW(ECM_REQUEST *er)
409{
410 if ((er->rc<10) )
411 {
412 send_dcw(er);
413 }
414}
415
416static int reader_do_emm(EMM_PACKET *ep)
417{
418 int i, no, rc, ecs;
419 char *rtxt[] = { "error", "written", "skipped", "blocked" };
420 struct timeb tps, tpe;
421
422 cs_ftime(&tps);
423
424 if (memcmp(ep->hexserial, reader[ridx].hexserial, 8))
425 return(3);
426
427 no=0;
428 for (i=ecs=0; (i<CS_EMMCACHESIZE) && (!ecs); i++)
429 if (!memcmp(emmcache[i].emm, ep->emm, ep->emm[2]))
430 {
431 if (reader[ridx].cachemm)
432 ecs=(reader[ridx].rewritemm > emmcache[i].count) ? 1 : 2;
433 else
434 ecs=1;
435 no=++emmcache[i].count;
436 i--;
437 }
438
439 if ((rc=ecs)<2)
440 {
441 rc=(proxy) ? 0 : reader_emm(ep);
442 if (!ecs)
443 {
444 i=reader_store_emm(ep->emm, ep->type);
445 no=1;
446 }
447 }
448 if (rc) client[cs_idx].lastemm=time((time_t)0);
449
450 if (reader[ridx].logemm>=rc)
451 {
452 cs_ftime(&tpe);
453// cs_log("%s type=%02x, len=%d, idx=%d, cnt=%d: %s (%d ms)",
454// cs_inet_ntoa(client[ep->cidx].ip), emmcache[i].type, ep->emm[2],
455// i, no, rtxt[rc], 1000*(tpe.time-tps.time)+tpe.millitm-tps.millitm);
456 cs_log("%s type=%02x, len=%d, idx=%d, cnt=%d: %s (%d ms)",
457 username(ep->cidx), emmcache[i].type, ep->emm[2],
458 i, no, rtxt[rc], 1000*(tpe.time-tps.time)+tpe.millitm-tps.millitm);
459 }
460 return(rc);
461}
462
463static int reader_listen(int fd1, int fd2)
464{
465 int fdmax, tcp_toflag, use_tv=(!proxy);
466 int is_tcp=(reader[ridx].ph.type==MOD_CONN_TCP);
467 fd_set fds;
468 struct timeval tv;
469
470 if(reader[ridx].typ==R_GBOX){
471 struct timeb tpe;
472 int x;
473 ulong ms;
474 cs_ftime(&tpe);
475 for(x=0;x<CS_MAXPENDING;x++){
476 ms=1000*(tpe.time-ecmtask[x].tps.time)+tpe.millitm-ecmtask[x].tps.millitm;
477 if(ecmtask[x].rc == 10 && ms > cfg->ctimeout && ridx == ecmtask[x].gbxRidx){
478 //cs_log("hello rc=%d idx:%d x:%d ridx%d ridx:%d",ecmtask[x].rc,ecmtask[x].idx,x,ridx,ecmtask[x].gbxRidx);
479 ecmtask[x].rc=5;
480 send_dcw(&ecmtask[x]);
481
482 }
483 }
484 }
485
486 if (master_pid!=getppid()) cs_exit(0);
487 tcp_toflag=(fd2 && is_tcp && reader[ridx].tcp_ito && reader[ridx].tcp_connected);
488 tv.tv_sec = 0;
489 tv.tv_usec = 100000L;
490 if (tcp_toflag)
491 {
492 tv.tv_sec = reader[ridx].tcp_ito*60;
493 tv.tv_usec = 0;
494 use_tv = 1;
495 }
496 FD_ZERO(&fds);
497 FD_SET(fd1, &fds);
498 if (fd2) FD_SET(fd2, &fds);
499 if (logfd) FD_SET(logfd, &fds);
500 fdmax=(fd1>fd2) ? fd1 : fd2;
501 fdmax=(fdmax>logfd) ? fdmax : logfd;
502 if (select(fdmax+1, &fds, 0, 0, (use_tv) ? &tv : 0)<0) return(0);
503 if (master_pid!=getppid()) cs_exit(0);
504
505 if ((logfd) && (FD_ISSET(logfd, &fds)))
506 {
507 cs_debug("select: log-socket ist set");
508 return(3);
509 }
510
511 if ((fd2) && (FD_ISSET(fd2, &fds)))
512 {
513 cs_debug("select: socket is set");
514 return(2);
515 }
516
517 if (FD_ISSET(fd1, &fds))
518 {
519 if (tcp_toflag)
520 {
521 time_t now;
522 int time_diff;
523 time(&now);
524 time_diff = abs(now-reader[ridx].last_s);
525 if (time_diff>(reader[ridx].tcp_ito*60))
526 {
527 cs_debug("%s inactive_timeout (%d), close connection (fd=%d)",
528 reader[ridx].ph.desc, time_diff, fd2);
529 network_tcp_connection_close(fd2);
530 }
531 }
532 cs_debug("select: pipe is set");
533 return(1);
534 }
535
536 if (tcp_toflag)
537 {
538 cs_debug("%s inactive_timeout (%d), close connection (fd=%d)",
539 reader[ridx].ph.desc, tv.tv_sec, fd2);
540 network_tcp_connection_close(fd2);
541 return(0);
542 }
543
544 if (!proxy) reader_checkhealth();
545 return(0);
546}
547
548static void reader_do_pipe()
549{
550 uchar *ptr;
551 switch(read_from_pipe(fd_m2c, &ptr, 0))
552 {
553 case PIP_ID_ECM:
554 reader_get_ecm((ECM_REQUEST *)ptr);
555 break;
556 case PIP_ID_DCW:
557 reader_send_DCW((ECM_REQUEST *)ptr);
558 break;
559 case PIP_ID_EMM:
560 reader_do_emm((EMM_PACKET *)ptr);
561 break;
562 case PIP_ID_CIN:
563 reader_card_info();
564 break;
565 }
566}
567
568static void reader_main()
569{
570 while (1)
571 {
572 switch(reader_listen(fd_m2c, pfd))
573 {
574 case 1: reader_do_pipe() ; break;
575 case 2: casc_do_sock(0) ; break;
576 case 3: casc_do_sock_log(); break;
577 }
578 }
579}
580
581void start_cardreader()
582{
583 cs_ptyp=D_READER;
584
585 if ((proxy=reader[ridx].typ & R_IS_CASCADING))
586 {
587 client[cs_idx].typ='p';
588 client[cs_idx].port=reader[ridx].r_port;
589 strcpy(client[cs_idx].usr, reader[ridx].r_usr);
590 switch(reader[ridx].typ)
591 {
592 case R_CAMD33 : module_camd33(&reader[ridx].ph); break;
593 case R_CAMD35 : module_camd35(&reader[ridx].ph); break;
594 case R_NEWCAMD : module_newcamd(&reader[ridx].ph); break;
595 case R_RADEGAST: module_radegast(&reader[ridx].ph); break;
596 case R_SERIAL : module_oscam_ser(&reader[ridx].ph); break;
597 case R_CS378X : module_camd35_tcp(&reader[ridx].ph); break;
598 case R_CCCAM : module_cccam(&reader[ridx].ph); break;
599#ifdef CS_WITH_GBOX
600 case R_GBOX : module_gbox(&reader[ridx].ph);strcpy(client[cs_idx].usr, reader[ridx].label); break;
601#endif
602 }
603 if (!(reader[ridx].ph.c_init))
604 {
605 cs_log("FATAL: %s-protocol not supporting cascading", reader[ridx].ph.desc);
606 sleep(1);
607 cs_exit(1);
608 }
609 if (reader[ridx].ph.c_init())
610 cs_exit(1);
611 if ((reader[ridx].log_port) && (reader[ridx].ph.c_init_log))
612 reader[ridx].ph.c_init_log();
613 }
614 else
615 {
616 client[cs_idx].ip=cs_inet_addr("127.0.0.1");
617 while (reader_device_init(reader[ridx].device)==2)
618 sleep(60); // wait 60 secs and try again
619 }
620
621 emmcache=(struct s_emm *)malloc(CS_EMMCACHESIZE*(sizeof(struct s_emm)));
622 if (!emmcache)
623 {
624 cs_log("Cannot allocate memory (errno=%d)", errno);
625 cs_exit(1);
626 }
627 memset(emmcache, 0, CS_EMMCACHESIZE*(sizeof(struct s_emm)));
628
629 ecmtask=(ECM_REQUEST *)malloc(CS_MAXPENDING*(sizeof(ECM_REQUEST)));
630 if (!ecmtask)
631 {
632 cs_log("Cannot allocate memory (errno=%d)", errno);
633 cs_exit(1);
634 }
635 memset(ecmtask, 0, CS_MAXPENDING*(sizeof(ECM_REQUEST)));
636
637 reader_main();
638 cs_exit(0);
639}
Note: See TracBrowser for help on using the repository browser.