can_eth_gw Gateway Module  0.1
A bidirectional CAN to Ethernet Gateway (Kernel Module)
 All Data Structures Files Functions Variables Enumerations Enumerator Macros Groups Pages
ce_gw_dev.c
Go to the documentation of this file.
1 
11 /*****************************************************************************
12  * (C) Copyright 2013 Fabian Raab, Stefan Smarzly
13  *
14  * This file is part of CAN-Eth-GW.
15  *
16  * CAN-Eth-GW is free software: you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation, either version 3 of the License, or
19  * (at your option) any later version.
20  *
21  * CAN-Eth-GW is distributed in the hope that it will be useful,
22  * but WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24  * GNU General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with CAN-Eth-GW. If not, see <http://www.gnu.org/licenses/>.
28  *****************************************************************************/
29 
30 #include <linux/version.h>
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/list.h>
34 
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
38 # include <uapi/linux/can.h>
39 # else
40 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
41 # include <linux/can.h>
42 # else
43 # error Only Linux Kernel 3.6 and above are supported
44 # endif
45 #endif
46 #include "ce_gw_dev.h"
47 #include "ce_gw_main.h"
48 
49 #include <asm-generic/errno-base.h>
50 #include <asm-generic/errno.h>
51 
52 HLIST_HEAD(ce_gw_dev_allocated);
53 HLIST_HEAD(ce_gw_dev_registered);
54 static struct kmem_cache *ce_gw_dev_cache __read_mostly;
63  struct hlist_node list_alloc;
64  struct hlist_node list_reg;
65  struct rcu_head rcu;
66  struct net_device *dev;
67 };
68 
77 int ce_gw_dev_open(struct net_device *dev)
78 {
79  printk("ce_gw: ce_gw_open called\n");
80 
81  if (!netif_device_present(dev)) {
82  pr_err("ce_gw_dev_open: Device not registered");
83  return -1;
84  }
85 
86  netif_start_queue(dev);
87  return 0;
88 }
89 
98 int ce_gw_dev_stop(struct net_device *dev)
99 {
100  printk ("ce_gw_dev: ce_gw_release called\n");
101  netif_stop_queue(dev);
102  return 0;
103 }
104 
115 static int ce_gw_dev_start_xmit(struct sk_buff *skb,
116  struct net_device *dev)
117 {
118  printk ("ce_gw_dev: dummy xmit function called....\n");
119  /* TODO: Get right gw_job and push it to eth_skb (instead of NULL) */
120  struct ce_gw_job_info *priv = netdev_priv(dev);
121 
122  struct ce_gw_job *job = NULL;
123  struct hlist_node *node;
124 
125 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
126  hlist_for_each_entry_safe(job, node, &priv->job_src, list_dev) {
127 
128 # else
129  struct hlist_node *pos;
130  hlist_for_each_entry_safe(job, pos, node, &priv->job_src,
131  list_dev) {
132 # endif
133  ce_gw_eth_rcv(skb, job);
134  }
135 
136  dev_kfree_skb(skb);
137 
138  /*here is my test for ce_gw_eth_to_canfd*/
139  /* struct sk_buff *can_skb;*/
140  /* __u32 id = 0xF65C034B;*/
141  /* __u8 flags = 0x04;*/
142  /* __u8 res0 = 0xF3;*/
143  /* __u8 res1 = 0x00;*/
144  /* can_skb = ce_gw_eth_to_canfd(id, flags, res0, res1, skb, dev);*/
145  return 0;
146 }
147 
156 int ce_gw_dev_init(struct net_device *dev) {
157  printk ("ce_gw_dev: device init called\n");
158  return 0;
159 }
160 
164 static struct net_device_ops ce_gw_ops = {
165  .ndo_init = ce_gw_dev_init,
166  .ndo_open = ce_gw_dev_open,
167  .ndo_stop = ce_gw_dev_stop,
168  .ndo_start_xmit = ce_gw_dev_start_xmit,
169  0
170 };
171 
172 int ce_gw_is_allocated_dev(struct net_device *eth_dev) {
173 
174  struct ce_gw_dev_list *dl = NULL;
175  struct hlist_node *node;
176 
177 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
178  hlist_for_each_entry_safe(dl, node, &ce_gw_dev_allocated, list_alloc) {
179 
180 # else
181  struct hlist_node *pos;
182  hlist_for_each_entry_safe(dl, pos, node, &ce_gw_dev_allocated,
183  list_alloc) {
184 # endif
185  if (dl->dev == eth_dev)
186  return 0;
187  }
188 
189  return -ENODEV;
190 }
191 
192 int ce_gw_is_registered_dev(struct net_device *eth_dev) {
193 
194  struct ce_gw_dev_list *dl = NULL;
195  struct hlist_node *node;
196 
197 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
198  hlist_for_each_entry_safe(dl, node, &ce_gw_dev_registered, list_reg) {
199 
200 # else
201  struct hlist_node *pos;
202  hlist_for_each_entry_safe(dl, pos, node, &ce_gw_dev_registered,
203  list_reg) {
204 # endif
205  if (dl->dev == eth_dev)
206  return 0;
207  }
208 
209  pr_debug("ce_gw_is_registered_dev: Device not registered\n");
210  return -ENODEV;
211 }
212 
213 int ce_gw_has_min_mtu(struct net_device *dev, enum ce_gw_type type, u32 flags) {
214 
215  int mtu = ETH_DATA_LEN; /* Standart Ethernet Value */
216 
217  switch (type) {
218  case CE_GW_TYPE_NONE:
219  /* Do Nothing (default Ethernet MTU will be set) */
220  break;
221  case CE_GW_TYPE_ETH:
222  if ((flags & CE_GW_F_CAN_FD) == CE_GW_F_CAN_FD) {
223  mtu = CANFD_MAX_DLEN;
224  } else {
225  mtu = CAN_MAX_DLEN;
226  }
227  break;
228  case CE_GW_TYPE_NET:
229  if ((flags & CE_GW_F_CAN_FD) == CE_GW_F_CAN_FD) {
230  mtu = sizeof(struct canfd_frame);
231  } else {
232  mtu = sizeof(struct can_frame);
233  }
234  break;
235  case CE_GW_TYPE_TCP:
236  /* TODO nothing yet */
237  break;
238  case CE_GW_TYPE_UDP:
239  /* TODO nothing yet */
240  break;
241  default:
242  pr_err("ce_gw_dev: Type not defined.");
243  }
244 
245  if (dev->mtu >= mtu) {
246  return true;
247  }
248 
249  return false;
250 }
251 
252 void ce_gw_dev_job_src_add(struct ce_gw_job *job) {
253  struct ce_gw_job_info *priv = netdev_priv(job->src.dev);
254  hlist_add_head_rcu(&job->list_dev, &priv->job_src);
255 }
256 
257 void ce_gw_dev_job_dst_add(struct ce_gw_job *job) {
258  struct ce_gw_job_info *priv = netdev_priv(job->dst.dev);
259  hlist_add_head_rcu(&job->list_dev, &priv->job_dst);
260 }
261 
262 
263 int ce_gw_dev_job_add(struct net_device *eth_dev, struct ce_gw_job *job) {
264  if (job->src.dev == eth_dev) {
266  } else if (job->dst.dev == eth_dev) {
268  } else {
269  pr_err("ce_gw_dev_job_add: Invalid Arguments");
270  return -1;
271  }
272  return 0;
273 }
274 
275 void ce_gw_dev_job_remove(struct ce_gw_job *job) {
276  hlist_del_rcu(&job->list_dev);
277 }
278 
279 struct net_device *ce_gw_dev_alloc(char *dev_name) {
280  pr_debug("ce_gw_dev: Alloc Device\n");
281  struct net_device *dev;
282 
283  dev = alloc_netdev(sizeof(struct ce_gw_job_info),
284  dev_name, ether_setup);
285  if (dev == NULL) {
286  pr_err("ce_gw_dev: Error allocation etherdev.");
287  goto ce_gw_dev_create_error;
288  }
289 
290  /* initialize private field */
291  struct ce_gw_job_info *priv = netdev_priv(dev);
292  memset(priv, 0, sizeof(struct ce_gw_job_info));
293  priv->job_src.first = NULL;
294  priv->job_dst.first = NULL;
295 
296  /* create list entry and add */
297  struct ce_gw_dev_list *dl;
298  dl = kmem_cache_alloc(ce_gw_dev_cache, GFP_KERNEL);
299  if (dl == NULL) {
300  pr_err("ce_gw_dev: cache alloc failed");
301  goto ce_gw_dev_create_error_cache;
302  }
303 
304  dl->dev = dev;
305 
306  hlist_add_head_rcu(&dl->list_alloc, &ce_gw_dev_allocated);
307 
308  return dev;
309 
310 ce_gw_dev_create_error_cache:
311  kmem_cache_free(ce_gw_dev_cache, dl);
312 
313 ce_gw_dev_create_error:
314  free_netdev(dev);
315  return NULL;
316 }
317 
318 void ce_gw_dev_free(struct net_device *eth_dev) {
319  pr_debug("ce_gw_dev: Free Device %s\n", eth_dev->name);
320 
321  struct ce_gw_dev_list *dl = NULL;
322  struct hlist_node *node;
323 
324  /* search for the List Element of eth_dev */
325 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
326  hlist_for_each_entry_safe(dl, node, &ce_gw_dev_allocated, list_alloc) {
327 
328 # else
329  struct hlist_node *pos;
330  hlist_for_each_entry_safe(dl, pos, node, &ce_gw_dev_allocated,
331  list_alloc) {
332 # endif
333  if (dl->dev == eth_dev)
334  break;
335  }
336 
337  if (dl == NULL || dl->dev != eth_dev) {
338  pr_err("ce_gw_dev: Device not found in list\n");
339  } else {
340  hlist_del_rcu(&dl->list_alloc);
341  }
342 
343  free_netdev(eth_dev);
344  kmem_cache_free(ce_gw_dev_cache, dl);
345 }
346 
347 void ce_gw_dev_setup(struct net_device *dev, enum ce_gw_type type,
348  __u32 flags) {
349  dev->netdev_ops = &ce_gw_ops;
350 
351  /* Set sensible MTU */
352  switch (type) {
353  case CE_GW_TYPE_NONE:
354  /* Do Nothing (default Ethernet MTU will be set) */
355  break;
356  case CE_GW_TYPE_ETH:
357  if ((flags & CE_GW_F_CAN_FD) == CE_GW_F_CAN_FD) {
358  dev->mtu = CANFD_MAX_DLEN;
359  } else {
360  dev->mtu = CAN_MAX_DLEN;
361  }
362  break;
363  case CE_GW_TYPE_NET:
364  if ((flags & CE_GW_F_CAN_FD) == CE_GW_F_CAN_FD) {
365  dev->mtu = sizeof(struct canfd_frame);
366  } else {
367  dev->mtu = sizeof(struct can_frame);
368  }
369  break;
370  case CE_GW_TYPE_TCP:
371  /* TODO nothing yet */
372  break;
373  case CE_GW_TYPE_UDP:
374  /* TODO nothing yet */
375  break;
376  default:
377  pr_err("ce_gw_dev: Type not defined.");
378  }
379 }
380 
381 struct net_device *ce_gw_dev_create(enum ce_gw_type type, __u32 flags,
382  char *dev_name) {
383  struct net_device *dev;
384 
385  dev = ce_gw_dev_alloc(dev_name);
386 
387  ce_gw_dev_setup(dev, type, flags);
388 
389  return dev;
390 }
391 
392 int ce_gw_dev_register(struct net_device *eth_dev) {
393  pr_debug("ce_gw_dev: Register Device\n");
394  int err = 0;
395  err = register_netdev(eth_dev);
396 
397  struct ce_gw_dev_list *dl = NULL;
398  struct hlist_node *node;
399 
400  /* search for the List Element of eth_dev */
401 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
402  hlist_for_each_entry_safe(dl, node, &ce_gw_dev_allocated, list_alloc) {
403 
404 # else
405  struct hlist_node *pos;
406  hlist_for_each_entry_safe(dl, pos, node, &ce_gw_dev_allocated,
407  list_alloc) {
408 # endif
409  if (dl->dev == eth_dev)
410  break;
411  }
412 
413  if (dl == NULL || dl->dev != eth_dev) {
414  pr_err("ce_gw_dev: Device not found in list\n");
415  goto ce_gw_dev_register_error;
416  }
417 
418  hlist_add_head_rcu(&dl->list_reg, &ce_gw_dev_registered);
419  if (&dl->list_reg == NULL) {
420  pr_err("ce_gw_dev: Device not add to list correct\n");
421  }
422 
423  return err;
424 
425 ce_gw_dev_register_error:
426  unregister_netdev(eth_dev);
427  return err;
428 }
429 
430 void ce_gw_dev_unregister(struct net_device *eth_dev) {
431  pr_debug("ce_gw_dev: Unregister Device %s\n", eth_dev->name);
432  int err = 0;
433 
434  struct ce_gw_job_info *priv = netdev_priv(eth_dev);
435  pr_debug("ce_gw_dev: Deleting all Routes of %s\n", eth_dev->name);
436 
437  /* Delete all routes witch are linked to the soon unregistered device
438  * where the device is the source */
439  struct ce_gw_job *job = NULL;
440  struct hlist_node *node;
441 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
442  hlist_for_each_entry_safe(job, node, &priv->job_src, list_dev) {
443 
444 # else
445  struct hlist_node *pos;
446  hlist_for_each_entry_safe(job, pos, node, &priv->job_src, list_dev) {
447 # endif
448 
449  err = ce_gw_remove_route(job->id);
450  if (err != 0) {
451  pr_err("ce_gw_dev: route with id %u "
452  "deleting failed: %d", job->id, err);
453  }
454  }
455 
456  /* Delete all routes witch are linked to the soon unregistered device
457  * where the device is the dest */
458  job = NULL;
459  node = NULL;
460 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
461  hlist_for_each_entry_safe(job, node, &priv->job_dst, list_dev) {
462 
463 # else
464  pos = NULL;
465  hlist_for_each_entry_safe(job, pos, node, &priv->job_dst, list_dev) {
466 # endif
467 
468  err = ce_gw_remove_route(job->id);
469  {
470  pr_err("ce_gw_dev: route with id %u "
471  "deleting failed: %d", job->id, err);
472  }
473  }
474 
475  /* unregister */
476  pr_debug("ce_gw_dev: Call unregister_netdev() of %s\n", eth_dev->name);
477  unregister_netdev(eth_dev);
478 
479  struct ce_gw_dev_list *dl = NULL;
480  node = NULL;
481 
482  /* search for the List Element of eth_dev */
483 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
484  hlist_for_each_entry_safe(dl, node, &ce_gw_dev_registered, list_reg) {
485 
486 # else
487  pos = NULL;
488  hlist_for_each_entry_safe(dl, pos, node, &ce_gw_dev_registered,
489  list_reg) {
490 # endif
491  if (dl->dev == eth_dev)
492  break;
493  }
494 
495  if (dl == NULL || &dl->list_reg == NULL) {
496  pr_err("ce_gw_dev: Device not correct in internal list\n");
497  } else {
498  hlist_del_rcu(&dl->list_reg);
499  }
500 }
501 
502 
504  ce_gw_dev_cache = kmem_cache_create("can_eth_gw_dev",
505  sizeof(struct ce_gw_dev_list),
506  0, 0, NULL);
507  if (!ce_gw_dev_cache)
508  return -ENOMEM;
509 
510  return 0;
511 }
512 
513 void ce_gw_dev_cleanup(void) {
514  struct ce_gw_dev_list *dl = NULL;
515  struct hlist_node *node;
516 
517  /* iterate over list and unregister */
518 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
519  hlist_for_each_entry_safe(dl, node, &ce_gw_dev_registered, list_reg) {
520 
521 # else
522  struct hlist_node *pos;
523  hlist_for_each_entry_safe(dl, pos, node, &ce_gw_dev_registered,
524  list_reg) {
525 # endif
527  }
528 
529  dl = NULL;
530  node = NULL;
531 
532  /* iterate over list and free */
533 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
534  hlist_for_each_entry_safe(dl, node, &ce_gw_dev_allocated, list_alloc) {
535 
536 # else
537  pos = NULL;
538  hlist_for_each_entry_safe(dl, pos, node, &ce_gw_dev_allocated,
539  list_alloc) {
540 # endif
541  ce_gw_dev_free(dl->dev);
542  }
543 
544  kmem_cache_destroy(ce_gw_dev_cache);
545 }
546