en50221_transport.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296
  1. /*
  2. en50221 encoder An implementation for libdvb
  3. an implementation for the en50221 transport layer
  4. Copyright (C) 2004, 2005 Manu Abraham <abraham.manu@gmail.com>
  5. Copyright (C) 2005 Julian Scheel (julian at jusst dot de)
  6. Copyright (C) 2006 Andrew de Quincey (adq_dvb@lidskialf.net)
  7. This library is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU Lesser General Public License as
  9. published by the Free Software Foundation; either version 2.1 of
  10. the License, or (at your option) any later version.
  11. This program is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU Lesser General Public License for more details.
  15. You should have received a copy of the GNU Lesser General Public
  16. License along with this library; if not, write to the Free Software
  17. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <stdio.h>
  20. #include <unistd.h>
  21. #include <string.h>
  22. #include <pthread.h>
  23. #include <fcntl.h>
  24. #include <sys/ioctl.h>
  25. #include <sys/poll.h>
  26. #include <time.h>
  27. #include <libdvbmisc/dvbmisc.h>
  28. #include <libdvbapi/dvbca.h>
  29. #include "en50221_errno.h"
  30. #include "en50221_transport.h"
  31. #include "asn_1.h"
  32. // these are the Transport Tags, like
  33. // described in EN50221, Annex A.4.1.13 (pg70)
  34. #define T_SB 0x80 // sb primitive h<--m
  35. #define T_RCV 0x81 // receive primitive h-->m
  36. #define T_CREATE_T_C 0x82 // create transport connection primitive h-->m
  37. #define T_C_T_C_REPLY 0x83 // ctc reply primitive h<--m
  38. #define T_DELETE_T_C 0x84 // delete tc primitive h<->m
  39. #define T_D_T_C_REPLY 0x85 // dtc reply primitive h<->m
  40. #define T_REQUEST_T_C 0x86 // request transport connection primitive h<--m
  41. #define T_NEW_T_C 0x87 // new tc / reply to t_request primitive h-->m
  42. #define T_T_C_ERROR 0x77 // error creating tc primitive h-->m
  43. #define T_DATA_LAST 0xA0 // convey data from higher constructed h<->m
  44. // layers
  45. #define T_DATA_MORE 0xA1 // convey data from higher constructed h<->m
  46. // layers
  47. struct en50221_message {
  48. struct en50221_message *next;
  49. uint32_t length;
  50. uint8_t data[0];
  51. };
  52. struct en50221_connection {
  53. uint32_t state; // the current state: idle/in_delete/in_create/active
  54. struct timeval tx_time; // time last request was sent from host->module, or 0 if ok
  55. struct timeval last_poll_time; // time of last poll transmission
  56. uint8_t *chain_buffer; // used to save parts of chained packets
  57. uint32_t buffer_length;
  58. struct en50221_message *send_queue;
  59. struct en50221_message *send_queue_tail;
  60. };
  61. struct en50221_slot {
  62. int ca_hndl;
  63. uint8_t slot; // CAM slot
  64. struct en50221_connection *connections;
  65. pthread_mutex_t slot_lock;
  66. uint32_t response_timeout;
  67. uint32_t poll_delay;
  68. };
  69. struct en50221_transport_layer {
  70. uint8_t max_slots;
  71. uint8_t max_connections_per_slot;
  72. struct en50221_slot *slots;
  73. struct pollfd *slot_pollfds;
  74. int slots_changed;
  75. pthread_mutex_t global_lock;
  76. pthread_mutex_t setcallback_lock;
  77. int error;
  78. int error_slot;
  79. en50221_tl_callback callback;
  80. void *callback_arg;
  81. };
  82. static int en50221_tl_process_data(struct en50221_transport_layer *tl,
  83. uint8_t slot_id, uint8_t * data,
  84. uint32_t data_length);
  85. static int en50221_tl_poll_tc(struct en50221_transport_layer *tl,
  86. uint8_t slot_id, uint8_t connection_id);
  87. static int en50221_tl_alloc_new_tc(struct en50221_transport_layer *tl,
  88. uint8_t slot_id);
  89. static void queue_message(struct en50221_transport_layer *tl,
  90. uint8_t slot_id, uint8_t connection_id,
  91. struct en50221_message *msg);
  92. static int en50221_tl_handle_create_tc_reply(struct en50221_transport_layer
  93. *tl, uint8_t slot_id,
  94. uint8_t connection_id);
  95. static int en50221_tl_handle_delete_tc(struct en50221_transport_layer *tl,
  96. uint8_t slot_id,
  97. uint8_t connection_id);
  98. static int en50221_tl_handle_delete_tc_reply(struct en50221_transport_layer
  99. *tl, uint8_t slot_id,
  100. uint8_t connection_id);
  101. static int en50221_tl_handle_request_tc(struct en50221_transport_layer *tl,
  102. uint8_t slot_id,
  103. uint8_t connection_id);
  104. static int en50221_tl_handle_data_more(struct en50221_transport_layer *tl,
  105. uint8_t slot_id,
  106. uint8_t connection_id,
  107. uint8_t * data,
  108. uint32_t data_length);
  109. static int en50221_tl_handle_data_last(struct en50221_transport_layer *tl,
  110. uint8_t slot_id,
  111. uint8_t connection_id,
  112. uint8_t * data,
  113. uint32_t data_length);
  114. static int en50221_tl_handle_sb(struct en50221_transport_layer *tl,
  115. uint8_t slot_id, uint8_t connection_id,
  116. uint8_t * data, uint32_t data_length);
  117. struct en50221_transport_layer *en50221_tl_create(uint8_t max_slots,
  118. uint8_t
  119. max_connections_per_slot)
  120. {
  121. struct en50221_transport_layer *tl = NULL;
  122. int i;
  123. int j;
  124. // setup structure
  125. tl = (struct en50221_transport_layer *)
  126. malloc(sizeof(struct en50221_transport_layer));
  127. if (tl == NULL)
  128. goto error_exit;
  129. tl->max_slots = max_slots;
  130. tl->max_connections_per_slot = max_connections_per_slot;
  131. tl->slots = NULL;
  132. tl->slot_pollfds = NULL;
  133. tl->slots_changed = 1;
  134. tl->callback = NULL;
  135. tl->callback_arg = NULL;
  136. tl->error_slot = 0;
  137. tl->error = 0;
  138. pthread_mutex_init(&tl->global_lock, NULL);
  139. pthread_mutex_init(&tl->setcallback_lock, NULL);
  140. // create the slots
  141. tl->slots = malloc(sizeof(struct en50221_slot) * max_slots);
  142. if (tl->slots == NULL)
  143. goto error_exit;
  144. // set them up
  145. for (i = 0; i < max_slots; i++) {
  146. tl->slots[i].ca_hndl = -1;
  147. // create the connections for this slot
  148. tl->slots[i].connections =
  149. malloc(sizeof(struct en50221_connection) * max_connections_per_slot);
  150. if (tl->slots[i].connections == NULL)
  151. goto error_exit;
  152. // create a mutex for the slot
  153. pthread_mutex_init(&tl->slots[i].slot_lock, NULL);
  154. // set them up
  155. for (j = 0; j < max_connections_per_slot; j++) {
  156. tl->slots[i].connections[j].state = T_STATE_IDLE;
  157. tl->slots[i].connections[j].tx_time.tv_sec = 0;
  158. tl->slots[i].connections[j].last_poll_time.tv_sec = 0;
  159. tl->slots[i].connections[j].last_poll_time.tv_usec = 0;
  160. tl->slots[i].connections[j].chain_buffer = NULL;
  161. tl->slots[i].connections[j].buffer_length = 0;
  162. tl->slots[i].connections[j].send_queue = NULL;
  163. tl->slots[i].connections[j].send_queue_tail = NULL;
  164. }
  165. }
  166. // create the pollfds
  167. tl->slot_pollfds = malloc(sizeof(struct pollfd) * max_slots);
  168. if (tl->slot_pollfds == NULL) {
  169. goto error_exit;
  170. }
  171. memset(tl->slot_pollfds, 0, sizeof(struct pollfd) * max_slots);
  172. return tl;
  173. error_exit:
  174. en50221_tl_destroy(tl);
  175. return NULL;
  176. }
  177. // Destroy an instance of the transport layer
  178. void en50221_tl_destroy(struct en50221_transport_layer *tl)
  179. {
  180. int i, j;
  181. if (tl) {
  182. if (tl->slots) {
  183. for (i = 0; i < tl->max_slots; i++) {
  184. if (tl->slots[i].connections) {
  185. for (j = 0; j < tl->max_connections_per_slot; j++) {
  186. if (tl->slots[i].connections[j].chain_buffer) {
  187. free(tl->slots[i].connections[j].chain_buffer);
  188. }
  189. struct en50221_message *cur_msg =
  190. tl->slots[i].connections[j].send_queue;
  191. while (cur_msg) {
  192. struct en50221_message *next_msg = cur_msg->next;
  193. free(cur_msg);
  194. cur_msg = next_msg;
  195. }
  196. tl->slots[i].connections[j].send_queue = NULL;
  197. tl->slots[i].connections[j].send_queue_tail = NULL;
  198. }
  199. free(tl->slots[i].connections);
  200. pthread_mutex_destroy(&tl->slots[i].slot_lock);
  201. }
  202. }
  203. free(tl->slots);
  204. }
  205. if (tl->slot_pollfds) {
  206. free(tl->slot_pollfds);
  207. }
  208. pthread_mutex_destroy(&tl->setcallback_lock);
  209. pthread_mutex_destroy(&tl->global_lock);
  210. free(tl);
  211. }
  212. }
  213. // this can be called from the user-space app to
  214. // register new slots that we should work with
  215. int en50221_tl_register_slot(struct en50221_transport_layer *tl,
  216. int ca_hndl, uint8_t slot,
  217. uint32_t response_timeout,
  218. uint32_t poll_delay)
  219. {
  220. // lock
  221. pthread_mutex_lock(&tl->global_lock);
  222. // we browse through the array of slots
  223. // to look for the first unused one
  224. int i;
  225. int16_t slot_id = -1;
  226. for (i = 0; i < tl->max_slots; i++) {
  227. if (tl->slots[i].ca_hndl == -1) {
  228. slot_id = i;
  229. break;
  230. }
  231. }
  232. if (slot_id == -1) {
  233. tl->error = EN50221ERR_OUTOFSLOTS;
  234. pthread_mutex_unlock(&tl->global_lock);
  235. return -1;
  236. }
  237. // set up the slot struct
  238. pthread_mutex_lock(&tl->slots[slot_id].slot_lock);
  239. tl->slots[slot_id].ca_hndl = ca_hndl;
  240. tl->slots[slot_id].slot = slot;
  241. tl->slots[slot_id].response_timeout = response_timeout;
  242. tl->slots[slot_id].poll_delay = poll_delay;
  243. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  244. tl->slots_changed = 1;
  245. pthread_mutex_unlock(&tl->global_lock);
  246. return slot_id;
  247. }
  248. void en50221_tl_destroy_slot(struct en50221_transport_layer *tl,
  249. uint8_t slot_id)
  250. {
  251. int i;
  252. if (slot_id >= tl->max_slots)
  253. return;
  254. // lock
  255. pthread_mutex_lock(&tl->global_lock);
  256. // clear the slot
  257. pthread_mutex_lock(&tl->slots[slot_id].slot_lock);
  258. tl->slots[slot_id].ca_hndl = -1;
  259. for (i = 0; i < tl->max_connections_per_slot; i++) {
  260. tl->slots[slot_id].connections[i].state = T_STATE_IDLE;
  261. tl->slots[slot_id].connections[i].tx_time.tv_sec = 0;
  262. tl->slots[slot_id].connections[i].last_poll_time.tv_sec = 0;
  263. tl->slots[slot_id].connections[i].last_poll_time.tv_usec = 0;
  264. if (tl->slots[slot_id].connections[i].chain_buffer) {
  265. free(tl->slots[slot_id].connections[i].
  266. chain_buffer);
  267. }
  268. tl->slots[slot_id].connections[i].chain_buffer = NULL;
  269. tl->slots[slot_id].connections[i].buffer_length = 0;
  270. struct en50221_message *cur_msg =
  271. tl->slots[slot_id].connections[i].send_queue;
  272. while (cur_msg) {
  273. struct en50221_message *next_msg = cur_msg->next;
  274. free(cur_msg);
  275. cur_msg = next_msg;
  276. }
  277. tl->slots[slot_id].connections[i].send_queue = NULL;
  278. tl->slots[slot_id].connections[i].send_queue_tail = NULL;
  279. }
  280. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  281. // tell upper layers
  282. pthread_mutex_lock(&tl->setcallback_lock);
  283. en50221_tl_callback cb = tl->callback;
  284. void *cb_arg = tl->callback_arg;
  285. pthread_mutex_unlock(&tl->setcallback_lock);
  286. if (cb)
  287. cb(cb_arg, T_CALLBACK_REASON_SLOTCLOSE, NULL, 0, slot_id, 0);
  288. tl->slots_changed = 1;
  289. pthread_mutex_unlock(&tl->global_lock);
  290. }
  291. int en50221_tl_poll(struct en50221_transport_layer *tl)
  292. {
  293. uint8_t data[4096];
  294. int slot_id;
  295. int j;
  296. // make up pollfds if the slots have changed
  297. pthread_mutex_lock(&tl->global_lock);
  298. if (tl->slots_changed) {
  299. for (slot_id = 0; slot_id < tl->max_slots; slot_id++) {
  300. if (tl->slots[slot_id].ca_hndl != -1) {
  301. tl->slot_pollfds[slot_id].fd = tl->slots[slot_id].ca_hndl;
  302. tl->slot_pollfds[slot_id].events = POLLIN | POLLPRI | POLLERR;
  303. tl->slot_pollfds[slot_id].revents = 0;
  304. } else {
  305. tl->slot_pollfds[slot_id].fd = 0;
  306. tl->slot_pollfds[slot_id].events = 0;
  307. tl->slot_pollfds[slot_id].revents = 0;
  308. }
  309. }
  310. tl->slots_changed = 0;
  311. }
  312. pthread_mutex_unlock(&tl->global_lock);
  313. // anything happened?
  314. if (poll(tl->slot_pollfds, tl->max_slots, 10) < 0) {
  315. tl->error_slot = -1;
  316. tl->error = EN50221ERR_CAREAD;
  317. return -1;
  318. }
  319. // go through all slots (even though poll may not have reported any events
  320. for (slot_id = 0; slot_id < tl->max_slots; slot_id++) {
  321. // check if this slot is still used and get its handle
  322. pthread_mutex_lock(&tl->slots[slot_id].slot_lock);
  323. if (tl->slots[slot_id].ca_hndl == -1) {
  324. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  325. continue;
  326. }
  327. int ca_hndl = tl->slots[slot_id].ca_hndl;
  328. if (tl->slot_pollfds[slot_id].revents & (POLLPRI | POLLIN)) {
  329. // read data
  330. uint8_t r_slot_id;
  331. uint8_t connection_id;
  332. int readcnt = dvbca_link_read(ca_hndl, &r_slot_id,
  333. &connection_id,
  334. data, sizeof(data));
  335. if (readcnt < 0) {
  336. tl->error_slot = slot_id;
  337. tl->error = EN50221ERR_CAREAD;
  338. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  339. return -1;
  340. }
  341. // process it if we got some
  342. if (readcnt > 0) {
  343. if (tl->slots[slot_id].slot != r_slot_id) {
  344. // this message is for an other CAM of the same CA
  345. int new_slot_id;
  346. for (new_slot_id = 0; new_slot_id < tl->max_slots; new_slot_id++) {
  347. if ((tl->slots[new_slot_id].ca_hndl == ca_hndl) &&
  348. (tl->slots[new_slot_id].slot == r_slot_id))
  349. break;
  350. }
  351. if (new_slot_id != tl->max_slots) {
  352. // we found the requested CAM
  353. pthread_mutex_lock(&tl->slots[new_slot_id].slot_lock);
  354. if (en50221_tl_process_data(tl, new_slot_id, data, readcnt)) {
  355. pthread_mutex_unlock(&tl->slots[new_slot_id].slot_lock);
  356. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  357. return -1;
  358. }
  359. pthread_mutex_unlock(&tl->slots[new_slot_id].slot_lock);
  360. } else {
  361. tl->error = EN50221ERR_BADSLOTID;
  362. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  363. return -1;
  364. }
  365. } else
  366. if (en50221_tl_process_data(tl, slot_id, data, readcnt)) {
  367. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  368. return -1;
  369. }
  370. }
  371. } else if (tl->slot_pollfds[slot_id].revents & POLLERR) {
  372. // an error was reported
  373. tl->error_slot = slot_id;
  374. tl->error = EN50221ERR_CAREAD;
  375. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  376. return -1;
  377. }
  378. // poll the connections on this slot + check for timeouts
  379. for (j = 0; j < tl->max_connections_per_slot; j++) {
  380. // ignore connection if idle
  381. if (tl->slots[slot_id].connections[j].state == T_STATE_IDLE) {
  382. continue;
  383. }
  384. // send queued data
  385. if (tl->slots[slot_id].connections[j].state &
  386. (T_STATE_IN_CREATION | T_STATE_ACTIVE | T_STATE_ACTIVE_DELETEQUEUED)) {
  387. // send data if there is some to go and we're not waiting for a response already
  388. if (tl->slots[slot_id].connections[j].send_queue &&
  389. (tl->slots[slot_id].connections[j].tx_time.tv_sec == 0)) {
  390. // get the message
  391. struct en50221_message *msg =
  392. tl->slots[slot_id].connections[j].send_queue;
  393. if (msg->next != NULL) {
  394. tl->slots[slot_id].connections[j].send_queue = msg->next;
  395. } else {
  396. tl->slots[slot_id].connections[j].send_queue = NULL;
  397. tl->slots[slot_id].connections[j].send_queue_tail = NULL;
  398. }
  399. // send the message
  400. if (dvbca_link_write(tl->slots[slot_id].ca_hndl,
  401. tl->slots[slot_id].slot,
  402. j,
  403. msg->data, msg->length) < 0) {
  404. free(msg);
  405. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  406. tl->error_slot = slot_id;
  407. tl->error = EN50221ERR_CAWRITE;
  408. print(LOG_LEVEL, ERROR, 1, "CAWrite failed");
  409. return -1;
  410. }
  411. gettimeofday(&tl->slots[slot_id].connections[j].tx_time, 0);
  412. // fixup connection state for T_DELETE_T_C
  413. if (msg->length && (msg->data[0] == T_DELETE_T_C)) {
  414. tl->slots[slot_id].connections[j].state = T_STATE_IN_DELETION;
  415. if (tl->slots[slot_id].connections[j].chain_buffer) {
  416. free(tl->slots[slot_id].connections[j].chain_buffer);
  417. }
  418. tl->slots[slot_id].connections[j].chain_buffer = NULL;
  419. tl->slots[slot_id].connections[j].buffer_length = 0;
  420. }
  421. free(msg);
  422. }
  423. }
  424. // poll it if we're not expecting a reponse and the poll time has elapsed
  425. if (tl->slots[slot_id].connections[j].state & T_STATE_ACTIVE) {
  426. if ((tl->slots[slot_id].connections[j].tx_time.tv_sec == 0) &&
  427. (time_after(tl->slots[slot_id].connections[j].last_poll_time,
  428. tl->slots[slot_id].poll_delay))) {
  429. gettimeofday(&tl->slots[slot_id].connections[j].last_poll_time, 0);
  430. if (en50221_tl_poll_tc(tl, slot_id, j)) {
  431. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  432. return -1;
  433. }
  434. }
  435. }
  436. // check for timeouts - in any state
  437. if (tl->slots[slot_id].connections[j].tx_time.tv_sec &&
  438. (time_after(tl->slots[slot_id].connections[j].tx_time,
  439. tl->slots[slot_id].response_timeout))) {
  440. if (tl->slots[slot_id].connections[j].state &
  441. (T_STATE_IN_CREATION |T_STATE_IN_DELETION)) {
  442. tl->slots[slot_id].connections[j].state = T_STATE_IDLE;
  443. } else if (tl->slots[slot_id].connections[j].state &
  444. (T_STATE_ACTIVE | T_STATE_ACTIVE_DELETEQUEUED)) {
  445. tl->error_slot = slot_id;
  446. tl->error = EN50221ERR_TIMEOUT;
  447. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  448. return -1;
  449. }
  450. }
  451. }
  452. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  453. }
  454. return 0;
  455. }
  456. void en50221_tl_register_callback(struct en50221_transport_layer *tl,
  457. en50221_tl_callback callback, void *arg)
  458. {
  459. pthread_mutex_lock(&tl->setcallback_lock);
  460. tl->callback = callback;
  461. tl->callback_arg = arg;
  462. pthread_mutex_unlock(&tl->setcallback_lock);
  463. }
  464. int en50221_tl_get_error_slot(struct en50221_transport_layer *tl)
  465. {
  466. return tl->error_slot;
  467. }
  468. int en50221_tl_get_error(struct en50221_transport_layer *tl)
  469. {
  470. return tl->error;
  471. }
  472. int en50221_tl_send_data(struct en50221_transport_layer *tl,
  473. uint8_t slot_id, uint8_t connection_id,
  474. uint8_t * data, uint32_t data_size)
  475. {
  476. #ifdef DEBUG_TXDATA
  477. printf("[[[[[[[[[[[[[[[[[[[[\n");
  478. uint32_t ii = 0;
  479. for (ii = 0; ii < data_size; ii++) {
  480. printf("%02x: %02x\n", ii, data[ii]);
  481. }
  482. printf("]]]]]]]]]]]]]]]]]]]]\n");
  483. #endif
  484. if (slot_id >= tl->max_slots) {
  485. tl->error = EN50221ERR_BADSLOTID;
  486. return -1;
  487. }
  488. pthread_mutex_lock(&tl->slots[slot_id].slot_lock);
  489. if (tl->slots[slot_id].ca_hndl == -1) {
  490. tl->error = EN50221ERR_BADSLOTID;
  491. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  492. return -1;
  493. }
  494. if (connection_id >= tl->max_connections_per_slot) {
  495. tl->error_slot = slot_id;
  496. tl->error = EN50221ERR_BADCONNECTIONID;
  497. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  498. return -1;
  499. }
  500. if (tl->slots[slot_id].connections[connection_id].state != T_STATE_ACTIVE) {
  501. tl->error = EN50221ERR_BADCONNECTIONID;
  502. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  503. return -1;
  504. }
  505. // allocate msg structure
  506. struct en50221_message *msg =
  507. malloc(sizeof(struct en50221_message) + data_size + 10);
  508. if (msg == NULL) {
  509. tl->error_slot = slot_id;
  510. tl->error = EN50221ERR_OUTOFMEMORY;
  511. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  512. return -1;
  513. }
  514. // make up data to send
  515. int length_field_len;
  516. msg->data[0] = T_DATA_LAST;
  517. if ((length_field_len = asn_1_encode(data_size + 1, msg->data + 1, 3)) < 0) {
  518. free(msg);
  519. tl->error_slot = slot_id;
  520. tl->error = EN50221ERR_ASNENCODE;
  521. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  522. return -1;
  523. }
  524. msg->data[1 + length_field_len] = connection_id;
  525. memcpy(msg->data + 1 + length_field_len + 1, data, data_size);
  526. msg->length = 1 + length_field_len + 1 + data_size;
  527. // queue it for transmission
  528. queue_message(tl, slot_id, connection_id, msg);
  529. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  530. return 0;
  531. }
  532. int en50221_tl_send_datav(struct en50221_transport_layer *tl,
  533. uint8_t slot_id, uint8_t connection_id,
  534. struct iovec *vector, int iov_count)
  535. {
  536. #ifdef DEBUG_TXDATA
  537. printf("[[[[[[[[[[[[[[[[[[[[\n");
  538. uint32_t ii = 0;
  539. uint32_t iipos = 0;
  540. for (ii = 0; ii < (uint32_t) iov_count; ii++) {
  541. uint32_t jj;
  542. for (jj = 0; jj < vector[ii].iov_len; jj++) {
  543. printf("%02x: %02x\n", jj + iipos,
  544. *((uint8_t *) (vector[ii].iov_base) + jj));
  545. }
  546. iipos += vector[ii].iov_len;
  547. }
  548. printf("]]]]]]]]]]]]]]]]]]]]\n");
  549. #endif
  550. if (slot_id >= tl->max_slots) {
  551. tl->error = EN50221ERR_BADSLOTID;
  552. return -1;
  553. }
  554. pthread_mutex_lock(&tl->slots[slot_id].slot_lock);
  555. if (tl->slots[slot_id].ca_hndl == -1) {
  556. tl->error = EN50221ERR_BADSLOTID;
  557. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  558. return -1;
  559. }
  560. if (connection_id >= tl->max_connections_per_slot) {
  561. tl->error_slot = slot_id;
  562. tl->error = EN50221ERR_BADCONNECTIONID;
  563. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  564. return -1;
  565. }
  566. if (tl->slots[slot_id].connections[connection_id].state != T_STATE_ACTIVE) {
  567. tl->error = EN50221ERR_BADCONNECTIONID;
  568. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  569. return -1;
  570. }
  571. // calculate the total length of the data to send
  572. uint32_t data_size = 0;
  573. int i;
  574. for (i = 0; i < iov_count; i++) {
  575. data_size += vector[i].iov_len;
  576. }
  577. // allocate msg structure
  578. struct en50221_message *msg =
  579. malloc(sizeof(struct en50221_message) + data_size + 10);
  580. if (msg == NULL) {
  581. tl->error_slot = slot_id;
  582. tl->error = EN50221ERR_OUTOFMEMORY;
  583. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  584. return -1;
  585. }
  586. // make up data to send
  587. int length_field_len;
  588. msg->data[0] = T_DATA_LAST;
  589. if ((length_field_len = asn_1_encode(data_size + 1, msg->data + 1, 3)) < 0) {
  590. free(msg);
  591. tl->error_slot = slot_id;
  592. tl->error = EN50221ERR_ASNENCODE;
  593. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  594. return -1;
  595. }
  596. msg->data[1 + length_field_len] = connection_id;
  597. msg->length = 1 + length_field_len + 1 + data_size;
  598. msg->next = NULL;
  599. // merge the iovecs
  600. uint32_t pos = 1 + length_field_len + 1;
  601. for (i = 0; i < iov_count; i++) {
  602. memcpy(msg->data + pos, vector[i].iov_base,
  603. vector[i].iov_len);
  604. pos += vector[i].iov_len;
  605. }
  606. // queue it for transmission
  607. queue_message(tl, slot_id, connection_id, msg);
  608. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  609. return 0;
  610. }
  611. int en50221_tl_new_tc(struct en50221_transport_layer *tl, uint8_t slot_id)
  612. {
  613. // check
  614. if (slot_id >= tl->max_slots) {
  615. tl->error = EN50221ERR_BADSLOTID;
  616. return -1;
  617. }
  618. pthread_mutex_lock(&tl->slots[slot_id].slot_lock);
  619. if (tl->slots[slot_id].ca_hndl == -1) {
  620. tl->error = EN50221ERR_BADSLOTID;
  621. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  622. return -1;
  623. }
  624. // allocate a new connection if possible
  625. int conid = en50221_tl_alloc_new_tc(tl, slot_id);
  626. if (conid == -1) {
  627. tl->error_slot = slot_id;
  628. tl->error = EN50221ERR_OUTOFCONNECTIONS;
  629. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  630. return -1;
  631. }
  632. // allocate msg structure
  633. struct en50221_message *msg =
  634. malloc(sizeof(struct en50221_message) + 3);
  635. if (msg == NULL) {
  636. tl->error_slot = slot_id;
  637. tl->error = EN50221ERR_OUTOFMEMORY;
  638. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  639. return -1;
  640. }
  641. // make up the data to send
  642. msg->data[0] = T_CREATE_T_C;
  643. msg->data[1] = 1;
  644. msg->data[2] = conid;
  645. msg->length = 3;
  646. msg->next = NULL;
  647. // queue it for transmission
  648. queue_message(tl, slot_id, conid, msg);
  649. // done
  650. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  651. return conid;
  652. }
  653. int en50221_tl_del_tc(struct en50221_transport_layer *tl, uint8_t slot_id,
  654. uint8_t connection_id)
  655. {
  656. // check
  657. if (slot_id >= tl->max_slots) {
  658. tl->error = EN50221ERR_BADSLOTID;
  659. return -1;
  660. }
  661. pthread_mutex_lock(&tl->slots[slot_id].slot_lock);
  662. if (tl->slots[slot_id].ca_hndl == -1) {
  663. tl->error = EN50221ERR_BADSLOTID;
  664. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  665. return -1;
  666. }
  667. if (connection_id >= tl->max_connections_per_slot) {
  668. tl->error_slot = slot_id;
  669. tl->error = EN50221ERR_BADCONNECTIONID;
  670. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  671. return -1;
  672. }
  673. if (!(tl->slots[slot_id].connections[connection_id].state &
  674. (T_STATE_ACTIVE | T_STATE_IN_DELETION))) {
  675. tl->error_slot = slot_id;
  676. tl->error = EN50221ERR_BADSTATE;
  677. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  678. return -1;
  679. }
  680. // allocate msg structure
  681. struct en50221_message *msg =
  682. malloc(sizeof(struct en50221_message) + 3);
  683. if (msg == NULL) {
  684. tl->error_slot = slot_id;
  685. tl->error = EN50221ERR_OUTOFMEMORY;
  686. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  687. return -1;
  688. }
  689. // make up the data to send
  690. msg->data[0] = T_DELETE_T_C;
  691. msg->data[1] = 1;
  692. msg->data[2] = connection_id;
  693. msg->length = 3;
  694. msg->next = NULL;
  695. // queue it for transmission
  696. queue_message(tl, slot_id, connection_id, msg);
  697. tl->slots[slot_id].connections[connection_id].state =
  698. T_STATE_ACTIVE_DELETEQUEUED;
  699. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  700. return 0;
  701. }
  702. int en50221_tl_get_connection_state(struct en50221_transport_layer *tl,
  703. uint8_t slot_id, uint8_t connection_id)
  704. {
  705. if (slot_id >= tl->max_slots) {
  706. tl->error = EN50221ERR_BADSLOTID;
  707. return -1;
  708. }
  709. pthread_mutex_lock(&tl->slots[slot_id].slot_lock);
  710. if (tl->slots[slot_id].ca_hndl == -1) {
  711. tl->error = EN50221ERR_BADSLOTID;
  712. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  713. return -1;
  714. }
  715. if (connection_id >= tl->max_connections_per_slot) {
  716. tl->error_slot = slot_id;
  717. tl->error = EN50221ERR_BADCONNECTIONID;
  718. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  719. return -1;
  720. }
  721. int state = tl->slots[slot_id].connections[connection_id].state;
  722. pthread_mutex_unlock(&tl->slots[slot_id].slot_lock);
  723. return state;
  724. }
  725. // ask the module for new data
  726. static int en50221_tl_poll_tc(struct en50221_transport_layer *tl,
  727. uint8_t slot_id, uint8_t connection_id)
  728. {
  729. gettimeofday(&tl->slots[slot_id].connections[connection_id].
  730. tx_time, 0);
  731. // send command
  732. uint8_t hdr[3];
  733. hdr[0] = T_DATA_LAST;
  734. hdr[1] = 1;
  735. hdr[2] = connection_id;
  736. if (dvbca_link_write(tl->slots[slot_id].ca_hndl,
  737. tl->slots[slot_id].slot,
  738. connection_id, hdr, 3) < 0) {
  739. tl->error_slot = slot_id;
  740. tl->error = EN50221ERR_CAWRITE;
  741. return -1;
  742. }
  743. return 0;
  744. }
  745. // handle incoming data
  746. static int en50221_tl_process_data(struct en50221_transport_layer *tl,
  747. uint8_t slot_id, uint8_t * data,
  748. uint32_t data_length)
  749. {
  750. int result;
  751. #ifdef DEBUG_RXDATA
  752. printf("-------------------\n");
  753. uint32_t ii = 0;
  754. for (ii = 0; ii < data_length; ii++) {
  755. printf("%02x: %02x\n", ii, data[ii]);
  756. }
  757. printf("+++++++++++++++++++\n");
  758. #endif
  759. // process the received data
  760. while (data_length) {
  761. // parse the header
  762. uint8_t tpdu_tag = data[0];
  763. uint16_t asn_data_length;
  764. int length_field_len;
  765. if ((length_field_len = asn_1_decode(&asn_data_length, data + 1, data_length - 1)) < 0) {
  766. print(LOG_LEVEL, ERROR, 1,
  767. "Received data with invalid asn from module on slot %02x\n",
  768. slot_id);
  769. tl->error_slot = slot_id;
  770. tl->error = EN50221ERR_BADCAMDATA;
  771. return -1;
  772. }
  773. if ((asn_data_length < 1) ||
  774. (asn_data_length > (data_length - (1 + length_field_len)))) {
  775. print(LOG_LEVEL, ERROR, 1,
  776. "Received data with invalid length from module on slot %02x\n",
  777. slot_id);
  778. tl->error_slot = slot_id;
  779. tl->error = EN50221ERR_BADCAMDATA;
  780. return -1;
  781. }
  782. uint8_t connection_id = data[1 + length_field_len];
  783. data += 1 + length_field_len + 1;
  784. data_length -= (1 + length_field_len + 1);
  785. asn_data_length--;
  786. // check the connection_id
  787. if (connection_id >= tl->max_connections_per_slot) {
  788. print(LOG_LEVEL, ERROR, 1,
  789. "Received bad connection id %02x from module on slot %02x\n",
  790. connection_id, slot_id);
  791. tl->error_slot = slot_id;
  792. tl->error = EN50221ERR_BADCONNECTIONID;
  793. return -1;
  794. }
  795. // process the TPDUs
  796. switch (tpdu_tag) {
  797. case T_C_T_C_REPLY:
  798. if ((result = en50221_tl_handle_create_tc_reply(tl, slot_id, connection_id)) < 0) {
  799. return -1;
  800. }
  801. break;
  802. case T_DELETE_T_C:
  803. if ((result = en50221_tl_handle_delete_tc(tl, slot_id, connection_id)) < 0) {
  804. return -1;
  805. }
  806. break;
  807. case T_D_T_C_REPLY:
  808. if ((result = en50221_tl_handle_delete_tc_reply(tl, slot_id, connection_id)) < 0) {
  809. return -1;
  810. }
  811. break;
  812. case T_REQUEST_T_C:
  813. if ((result = en50221_tl_handle_request_tc(tl, slot_id, connection_id)) < 0) {
  814. return -1;
  815. }
  816. break;
  817. case T_DATA_MORE:
  818. if ((result = en50221_tl_handle_data_more(tl, slot_id,
  819. connection_id,
  820. data,
  821. asn_data_length)) < 0) {
  822. return -1;
  823. }
  824. break;
  825. case T_DATA_LAST:
  826. if ((result = en50221_tl_handle_data_last(tl, slot_id,
  827. connection_id,
  828. data,
  829. asn_data_length)) < 0) {
  830. return -1;
  831. }
  832. break;
  833. case T_SB:
  834. if ((result = en50221_tl_handle_sb(tl, slot_id,
  835. connection_id,
  836. data,
  837. asn_data_length)) < 0) {
  838. return -1;
  839. }
  840. break;
  841. default:
  842. print(LOG_LEVEL, ERROR, 1,
  843. "Recieved unexpected TPDU tag %02x from module on slot %02x\n",
  844. tpdu_tag, slot_id);
  845. tl->error_slot = slot_id;
  846. tl->error = EN50221ERR_BADCAMDATA;
  847. return -1;
  848. }
  849. // skip over the consumed data
  850. data += asn_data_length;
  851. data_length -= asn_data_length;
  852. }
  853. return 0;
  854. }
  855. static int en50221_tl_handle_create_tc_reply(struct en50221_transport_layer
  856. *tl, uint8_t slot_id,
  857. uint8_t connection_id)
  858. {
  859. // set this connection to state active
  860. if (tl->slots[slot_id].connections[connection_id].state == T_STATE_IN_CREATION) {
  861. tl->slots[slot_id].connections[connection_id].state = T_STATE_ACTIVE;
  862. tl->slots[slot_id].connections[connection_id].tx_time.tv_sec = 0;
  863. // tell upper layers
  864. pthread_mutex_lock(&tl->setcallback_lock);
  865. en50221_tl_callback cb = tl->callback;
  866. void *cb_arg = tl->callback_arg;
  867. pthread_mutex_unlock(&tl->setcallback_lock);
  868. if (cb)
  869. cb(cb_arg, T_CALLBACK_REASON_CONNECTIONOPEN, NULL, 0, slot_id, connection_id);
  870. } else {
  871. print(LOG_LEVEL, ERROR, 1,
  872. "Received T_C_T_C_REPLY for connection not in "
  873. "T_STATE_IN_CREATION from module on slot %02x\n",
  874. slot_id);
  875. tl->error_slot = slot_id;
  876. tl->error = EN50221ERR_BADCAMDATA;
  877. return -1;
  878. }
  879. return 0;
  880. }
  881. static int en50221_tl_handle_delete_tc(struct en50221_transport_layer *tl,
  882. uint8_t slot_id,
  883. uint8_t connection_id)
  884. {
  885. // immediately delete this connection and send D_T_C_REPLY
  886. if (tl->slots[slot_id].connections[connection_id].state &
  887. (T_STATE_ACTIVE | T_STATE_IN_DELETION)) {
  888. // clear down the slot
  889. tl->slots[slot_id].connections[connection_id].state = T_STATE_IDLE;
  890. if (tl->slots[slot_id].connections[connection_id].chain_buffer) {
  891. free(tl->slots[slot_id].connections[connection_id].chain_buffer);
  892. }
  893. tl->slots[slot_id].connections[connection_id].chain_buffer = NULL;
  894. tl->slots[slot_id].connections[connection_id].buffer_length = 0;
  895. // send the reply
  896. uint8_t hdr[3];
  897. hdr[0] = T_D_T_C_REPLY;
  898. hdr[1] = 1;
  899. hdr[2] = connection_id;
  900. if (dvbca_link_write(tl->slots[slot_id].ca_hndl,
  901. tl->slots[slot_id].slot,
  902. connection_id, hdr, 3) < 0) {
  903. tl->error_slot = slot_id;
  904. tl->error = EN50221ERR_CAWRITE;
  905. return -1;
  906. }
  907. // tell upper layers
  908. pthread_mutex_lock(&tl->setcallback_lock);
  909. en50221_tl_callback cb = tl->callback;
  910. void *cb_arg = tl->callback_arg;
  911. pthread_mutex_unlock(&tl->setcallback_lock);
  912. if (cb)
  913. cb(cb_arg, T_CALLBACK_REASON_CONNECTIONCLOSE, NULL, 0, slot_id, connection_id);
  914. } else {
  915. print(LOG_LEVEL, ERROR, 1,
  916. "Received T_DELETE_T_C for inactive connection from module on slot %02x\n",
  917. slot_id);
  918. tl->error_slot = slot_id;
  919. tl->error = EN50221ERR_BADCAMDATA;
  920. return -1;
  921. }
  922. return 0;
  923. }
  924. static int en50221_tl_handle_delete_tc_reply(struct en50221_transport_layer
  925. *tl, uint8_t slot_id,
  926. uint8_t connection_id)
  927. {
  928. // delete this connection, should be in T_STATE_IN_DELETION already
  929. if (tl->slots[slot_id].connections[connection_id].state == T_STATE_IN_DELETION) {
  930. tl->slots[slot_id].connections[connection_id].state = T_STATE_IDLE;
  931. } else {
  932. print(LOG_LEVEL, ERROR, 1,
  933. "Received T_D_T_C_REPLY received for connection not in "
  934. "T_STATE_IN_DELETION from module on slot %02x\n",
  935. slot_id);
  936. tl->error_slot = slot_id;
  937. tl->error = EN50221ERR_BADCAMDATA;
  938. return -1;
  939. }
  940. return 0;
  941. }
  942. static int en50221_tl_handle_request_tc(struct en50221_transport_layer *tl,
  943. uint8_t slot_id,
  944. uint8_t connection_id)
  945. {
  946. // allocate a new connection if possible
  947. int conid = en50221_tl_alloc_new_tc(tl, slot_id);
  948. int ca_hndl = tl->slots[slot_id].ca_hndl;
  949. if (conid == -1) {
  950. print(LOG_LEVEL, ERROR, 1,
  951. "Too many connections requested by module on slot %02x\n",
  952. slot_id);
  953. // send the error
  954. uint8_t hdr[4];
  955. hdr[0] = T_T_C_ERROR;
  956. hdr[1] = 2;
  957. hdr[2] = connection_id;
  958. hdr[3] = 1;
  959. if (dvbca_link_write(ca_hndl, tl->slots[slot_id].slot, connection_id, hdr, 4) < 0) {
  960. tl->error_slot = slot_id;
  961. tl->error = EN50221ERR_CAWRITE;
  962. return -1;
  963. }
  964. tl->slots[slot_id].connections[connection_id].tx_time.
  965. tv_sec = 0;
  966. } else {
  967. // send the NEW_T_C on the connection we received it on
  968. uint8_t hdr[4];
  969. hdr[0] = T_NEW_T_C;
  970. hdr[1] = 2;
  971. hdr[2] = connection_id;
  972. hdr[3] = conid;
  973. if (dvbca_link_write(ca_hndl, tl->slots[slot_id].slot, connection_id, hdr, 4) < 0) {
  974. tl->slots[slot_id].connections[conid].state = T_STATE_IDLE;
  975. tl->error_slot = slot_id;
  976. tl->error = EN50221ERR_CAWRITE;
  977. return -1;
  978. }
  979. tl->slots[slot_id].connections[connection_id].tx_time.tv_sec = 0;
  980. // send the CREATE_T_C on the new connnection
  981. hdr[0] = T_CREATE_T_C;
  982. hdr[1] = 1;
  983. hdr[2] = conid;
  984. if (dvbca_link_write(ca_hndl, tl->slots[slot_id].slot, conid, hdr, 3) < 0) {
  985. tl->slots[slot_id].connections[conid].state = T_STATE_IDLE;
  986. tl->error_slot = slot_id;
  987. tl->error = EN50221ERR_CAWRITE;
  988. return -1;
  989. }
  990. gettimeofday(&tl->slots[slot_id].connections[conid].tx_time, 0);
  991. // tell upper layers
  992. pthread_mutex_lock(&tl->setcallback_lock);
  993. en50221_tl_callback cb = tl->callback;
  994. void *cb_arg = tl->callback_arg;
  995. pthread_mutex_unlock(&tl->setcallback_lock);
  996. if (cb)
  997. cb(cb_arg, T_CALLBACK_REASON_CAMCONNECTIONOPEN, NULL, 0, slot_id, conid);
  998. }
  999. return 0;
  1000. }
  1001. static int en50221_tl_handle_data_more(struct en50221_transport_layer *tl,
  1002. uint8_t slot_id,
  1003. uint8_t connection_id,
  1004. uint8_t * data,
  1005. uint32_t data_length)
  1006. {
  1007. // connection in correct state?
  1008. if (tl->slots[slot_id].connections[connection_id].state != T_STATE_ACTIVE) {
  1009. print(LOG_LEVEL, ERROR, 1,
  1010. "Received T_DATA_MORE for connection not in "
  1011. "T_STATE_ACTIVE from module on slot %02x\n",
  1012. slot_id);
  1013. tl->error_slot = slot_id;
  1014. tl->error = EN50221ERR_BADCAMDATA;
  1015. return -1;
  1016. }
  1017. // a chained data packet is coming in, save
  1018. // it to the buffer and wait for more
  1019. tl->slots[slot_id].connections[connection_id].tx_time.tv_sec = 0;
  1020. int new_data_length =
  1021. tl->slots[slot_id].connections[connection_id].buffer_length + data_length;
  1022. uint8_t *new_data_buffer =
  1023. realloc(tl->slots[slot_id].connections[connection_id].chain_buffer, new_data_length);
  1024. if (new_data_buffer == NULL) {
  1025. tl->error_slot = slot_id;
  1026. tl->error = EN50221ERR_OUTOFMEMORY;
  1027. return -1;
  1028. }
  1029. tl->slots[slot_id].connections[connection_id].chain_buffer = new_data_buffer;
  1030. memcpy(tl->slots[slot_id].connections[connection_id].chain_buffer +
  1031. tl->slots[slot_id].connections[connection_id].buffer_length,
  1032. data, data_length);
  1033. tl->slots[slot_id].connections[connection_id].buffer_length = new_data_length;
  1034. return 0;
  1035. }
  1036. static int en50221_tl_handle_data_last(struct en50221_transport_layer *tl,
  1037. uint8_t slot_id,
  1038. uint8_t connection_id,
  1039. uint8_t * data,
  1040. uint32_t data_length)
  1041. {
  1042. // connection in correct state?
  1043. if (tl->slots[slot_id].connections[connection_id].state != T_STATE_ACTIVE) {
  1044. print(LOG_LEVEL, ERROR, 1,
  1045. "Received T_DATA_LAST received for connection not in "
  1046. "T_STATE_ACTIVE from module on slot %02x\n",
  1047. slot_id);
  1048. tl->error_slot = slot_id;
  1049. tl->error = EN50221ERR_BADCAMDATA;
  1050. return -1;
  1051. }
  1052. // last package of a chain or single package comes in
  1053. tl->slots[slot_id].connections[connection_id].tx_time.tv_sec = 0;
  1054. if (tl->slots[slot_id].connections[connection_id].chain_buffer == NULL) {
  1055. // single package => dispatch immediately
  1056. pthread_mutex_lock(&tl->setcallback_lock);
  1057. en50221_tl_callback cb = tl->callback;
  1058. void *cb_arg = tl->callback_arg;
  1059. pthread_mutex_unlock(&tl->setcallback_lock);
  1060. if (cb && data_length) {
  1061. pthread_mutex_unlock(&tl->slots[slot_id].
  1062. slot_lock);
  1063. cb(cb_arg, T_CALLBACK_REASON_DATA, data, data_length, slot_id, connection_id);
  1064. pthread_mutex_lock(&tl->slots[slot_id].slot_lock);
  1065. }
  1066. } else {
  1067. int new_data_length =
  1068. tl->slots[slot_id].connections[connection_id].buffer_length + data_length;
  1069. uint8_t *new_data_buffer =
  1070. realloc(tl->slots[slot_id].connections[connection_id].chain_buffer, new_data_length);
  1071. if (new_data_buffer == NULL) {
  1072. tl->error_slot = slot_id;
  1073. tl->error = EN50221ERR_OUTOFMEMORY;
  1074. return -1;
  1075. }
  1076. memcpy(new_data_buffer +
  1077. tl->slots[slot_id].connections[connection_id].
  1078. buffer_length, data, data_length);
  1079. // clean the buffer position
  1080. tl->slots[slot_id].connections[connection_id].chain_buffer = NULL;
  1081. tl->slots[slot_id].connections[connection_id].buffer_length = 0;
  1082. // tell the upper layers
  1083. pthread_mutex_lock(&tl->setcallback_lock);
  1084. en50221_tl_callback cb = tl->callback;
  1085. void *cb_arg = tl->callback_arg;
  1086. pthread_mutex_unlock(&tl->setcallback_lock);
  1087. if (cb && data_length) {
  1088. pthread_mutex_unlock(&tl->slots[slot_id].
  1089. slot_lock);
  1090. cb(cb_arg, T_CALLBACK_REASON_DATA, new_data_buffer,
  1091. new_data_length, slot_id, connection_id);
  1092. pthread_mutex_lock(&tl->slots[slot_id].slot_lock);
  1093. }
  1094. free(new_data_buffer);
  1095. }
  1096. return 0;
  1097. }
  1098. static int en50221_tl_handle_sb(struct en50221_transport_layer *tl,
  1099. uint8_t slot_id, uint8_t connection_id,
  1100. uint8_t * data, uint32_t data_length)
  1101. {
  1102. // is the connection id ok?
  1103. if (tl->slots[slot_id].connections[connection_id].state != T_STATE_ACTIVE) {
  1104. print(LOG_LEVEL, ERROR, 1,
  1105. "Received T_SB for connection not in T_STATE_ACTIVE from module on slot %02x\n",
  1106. slot_id);
  1107. tl->error_slot = slot_id;
  1108. tl->error = EN50221ERR_BADCAMDATA;
  1109. return -1;
  1110. }
  1111. // did we get enough data in the T_SB?
  1112. if (data_length != 1) {
  1113. print(LOG_LEVEL, ERROR, 1,
  1114. "Recieved T_SB with invalid length from module on slot %02x\n",
  1115. slot_id);
  1116. tl->error_slot = slot_id;
  1117. tl->error = EN50221ERR_BADCAMDATA;
  1118. return -1;
  1119. }
  1120. // tell it to send the data if it says there is some
  1121. if (data[0] & 0x80) {
  1122. int ca_hndl = tl->slots[slot_id].ca_hndl;
  1123. // send the RCV
  1124. uint8_t hdr[3];
  1125. hdr[0] = T_RCV;
  1126. hdr[1] = 1;
  1127. hdr[2] = connection_id;
  1128. if (dvbca_link_write(ca_hndl, tl->slots[slot_id].slot, connection_id, hdr, 3) < 0) {
  1129. tl->error_slot = slot_id;
  1130. tl->error = EN50221ERR_CAWRITE;
  1131. return -1;
  1132. }
  1133. gettimeofday(&tl->slots[slot_id].connections[connection_id].tx_time, 0);
  1134. } else {
  1135. // no data - indicate not waiting for anything now
  1136. tl->slots[slot_id].connections[connection_id].tx_time.tv_sec = 0;
  1137. }
  1138. return 0;
  1139. }
  1140. static int en50221_tl_alloc_new_tc(struct en50221_transport_layer *tl,
  1141. uint8_t slot_id)
  1142. {
  1143. // we browse through the array of connection
  1144. // types, to look for the first unused one
  1145. int i, conid = -1;
  1146. for (i = 1; i < tl->max_connections_per_slot; i++) {
  1147. if (tl->slots[slot_id].connections[i].state == T_STATE_IDLE) {
  1148. conid = i;
  1149. break;
  1150. }
  1151. }
  1152. if (conid == -1) {
  1153. print(LOG_LEVEL, ERROR, 1,
  1154. "CREATE_T_C failed: no more connections available\n");
  1155. return -1;
  1156. }
  1157. // set up the connection struct
  1158. tl->slots[slot_id].connections[conid].state = T_STATE_IN_CREATION;
  1159. tl->slots[slot_id].connections[conid].chain_buffer = NULL;
  1160. tl->slots[slot_id].connections[conid].buffer_length = 0;
  1161. return conid;
  1162. }
  1163. static void queue_message(struct en50221_transport_layer *tl,
  1164. uint8_t slot_id, uint8_t connection_id,
  1165. struct en50221_message *msg)
  1166. {
  1167. msg->next = NULL;
  1168. if (tl->slots[slot_id].connections[connection_id].send_queue_tail) {
  1169. tl->slots[slot_id].connections[connection_id].send_queue_tail->next = msg;
  1170. tl->slots[slot_id].connections[connection_id].send_queue_tail = msg;
  1171. } else {
  1172. tl->slots[slot_id].connections[connection_id].send_queue = msg;
  1173. tl->slots[slot_id].connections[connection_id].send_queue_tail = msg;
  1174. }
  1175. }