Skip to content

Instantly share code, notes, and snippets.

@jptalusan
Last active April 24, 2022 17:39
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save jptalusan/1f8492193340205b03db1474378e84ed to your computer and use it in GitHub Desktop.
Save jptalusan/1f8492193340205b03db1474378e84ed to your computer and use it in GitHub Desktop.
Distributed IoT references
@INPROCEEDINGS{nearcloud,
author={J. P. Talusan and Y. Nakamura and T. Mizumoto and K. Yasumoto},
booktitle={2018 IEEE 42nd Annual Computer Software and Applications Conference (COMPSAC)},
title={Near Cloud: Low-cost Low-Power Cloud Implementation for Rural Area Connectivity and Data Processing},
year={2018},
volume={02},
number={},
pages={622-627},
abstract={Information and communication technologies (ICTs) has enabled growth in developed countries and urban cities through improvements in communication systems, devices and applications. In rural areas, especially in developing countries, ICT penetration is not as high, often due to lack of available infrastructure and funding. With the increasing availability of Internet-of-Things (IoT) devices, low-cost large-scale deployments have become possible even in rural areas. We design, develop and implement, Near Cloud, a cloud-less platform that allows users and IoT devices to communicate and share information. This is built on top of a wireless mesh network (WMN) of low-cost, low-power IoT devices and deployed in areas where there is little to no Internet connectivity. To inject ICT and help bridge the digital divide in rural areas, Near Cloud provides functionalities such as web servers on nodes, accessibility to all users via Wi-Fi, and various data processing including image processing and machine learning. We will show applicability of Near Cloud in improving rural education, health care facilities, disaster response and agriculture.},
keywords={cloud computing;Internet of Things;wireless mesh networks;cloud-less platform;low-power IoT devices;Internet connectivity;rural areas;data processing;rural education;rural area connectivity;ICT penetration;Internet-of-Things devices;information and communication technologies;low-cost low-power cloud implementation;wireless mesh network;WMN;Conferences;Software;cloud less platform;wireless mesh network;rural connectivity},
doi={10.1109/COMPSAC.2018.10307},
ISSN={0730-3157},
month={July},}
@ARTICLE{deeplearning_food,
author={C. Liu and Y. Cao and Y. Luo and G. Chen and V. Vokkarane and M. Yunsheng and S. Chen and P. Hou},
journal={IEEE Transactions on Services Computing},
title={A New Deep Learning-Based Food Recognition System for Dietary Assessment on An Edge Computing Service Infrastructure},
year={2018},
volume={11},
number={2},
pages={249-261},
abstract={Literature has indicated that accurate dietary assessment is very important for assessing the effectiveness of weight loss interventions. However, most of the existing dietary assessment methods rely on memory. With the help of pervasive mobile devices and rich cloud services, it is now possible to develop new computer-aided food recognition system for accurate dietary assessment. However, enabling this future Internet of Things-based dietary assessment imposes several fundamental challenges on algorithm development and system design. In this paper, we set to address these issues from the following two aspects: (1) to develop novel deep learning-based visual food recognition algorithms to achieve the best-in-class recognition accuracy; (2) to design a food recognition system employing edge computing-based service computing paradigm to overcome some inherent problems of traditional mobile cloud computing paradigm, such as unacceptable system latency and low battery life of mobile devices. We have conducted extensive experiments with real-world data. Our results have shown that the proposed system achieved three objectives: (1) outperforming existing work in terms of food recognition accuracy; (2) reducing response time that is equivalent to the minimum of the existing approaches; and (3) lowering energy consumption which is close to the minimum of the state-of-the-art.},
keywords={cloud computing;image recognition;learning (artificial intelligence);mobile computing;mobile radio;food recognition system;edge computing service infrastructure;accurate dietary assessment;existing dietary assessment methods;pervasive mobile devices;rich cloud services;visual food recognition algorithms;best-in-class recognition accuracy;service computing paradigm;traditional mobile cloud computing paradigm;unacceptable system;food recognition accuracy;deep learning;Algorithm design and analysis;Machine learning;Edge computing;Mobile communication;Mobile handsets;Time factors;Image recognition;Mobile applications;object recognition;deep learning;edge computing;food recognition},
doi={10.1109/TSC.2017.2662008},
ISSN={1939-1374},
month={March},}
@ARTICLE{learning_iot_deep,
author={H. Li and K. Ota and M. Dong},
journal={IEEE Network},
title={Learning IoT in Edge: Deep Learning for the Internet of Things with Edge Computing},
year={2018},
volume={32},
number={1},
pages={96-101},
abstract={Deep learning is a promising approach for extracting accurate information from raw sensor data from IoT devices deployed in complex environments. Because of its multilayer structure, deep learning is also appropriate for the edge computing environment. Therefore, in this article, we first introduce deep learning for IoTs into the edge computing environment. Since existing edge nodes have limited processing capability, we also design a novel offloading strategy to optimize the performance of IoT deep learning applications with edge computing. In the performance evaluation, we test the performance of executing multiple deep learning tasks in an edge computing environment with our strategy. The evaluation results show that our method outperforms other optimization solutions on deep learning for IoT.},
keywords={cloud computing;Internet of Things;learning (artificial intelligence);edge computing environment;IoT deep learning applications;deep learning tasks;Internet of Things;Machine learning;Edge computing;Cloud computing;Feature extraction;Task analysis;Computational modeling;Servers},
doi={10.1109/MNET.2018.1700202},
ISSN={0890-8044},
month={Jan},}
% This is so near what we want to DO!
% But not timing and uses Hadoop (which im sure is slow)
% No ML
@INPROCEEDINGS{pi_dist_comp_testbed,
author={X. Wang and S. Jiang and X. Xu and Z. Wu and Y. Tao},
booktitle={2016 6th International Conference on Digital Home (ICDH)},
title={A Raspberry Pi and LXC Based Distributed Computing Testbed},
year={2016},
volume={},
number={},
pages={170-174},
abstract={With the development of the teaching and research of big data processing and distributed algorithms, there emerges an increasing need to leverage low-cost hardware to build the distributed computing test environment for the purpose of the algorithm validation and experiment simulation. In this paper, we propose a distributed computing testbed using the affordable Raspberry Pi board as the basic computing unit. We develop a cluster platform by building and assembling the units, and design a containerized service-computing framework by merging LXC and service computing techniques. Three kinds of services are introduced and configured to implement specific workflow orchestration on demand. For convenience, we develop a visualized user interface to manage cluster, deploy service containers, and manage workflows. We present the containerized service workflow that links the components of data source, distributed data processing and visualization, to validate the effectiveness of the proposed testbed.},
keywords={Big Data;computer science education;distributed processing;user interfaces;virtualisation;workflow management software;distributed data processing;distributed computing testbed;teaching;distributed algorithms;leverage low-cost hardware;distributed computing test environment;affordable Raspberry Pi board;basic computing unit;containerized service-computing framework;service computing techniques;containerized service workflow;Big Data processing teaching;visualized user interface;Containers;Distributed databases;Cloud computing;Data visualization;Processor scheduling;Hardware;Raspberry Pi;LinuX Container (LXC);Distributed computing framework;Containerized services;Cluster},
doi={10.1109/ICDH.2016.044},
ISSN={},
month={Dec},}
%This is closer i think...
%I've read this, no services no SB/RB no in-situ adjustments
@INPROCEEDINGS{embedded_middleware_pi,
author={S. Bhave and M. Tolentino and H. Zhu and J. Sheng},
booktitle={2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)},
title={Embedded Middleware for Distributed Raspberry Pi Device to Enable Big Data Applications},
year={2017},
volume={2},
number={},
pages={103-108},
abstract={Applications making use of embedded systems are anticipated to become extremely important as we advance towards realizing the vision of "Internet of Things" with smart devices such as Raspberry Pi, and compute-anywhere paradigm where principles of distributed systems play pivotal roles. A case we envision here is a distributed network of low powered devices to accomplish various tasks autonomously. Driven by a distributed embedded system architecture, each of the devices can work on independent local data, which is device specific, to perform similar compute tasks simultaneously so a common goal can be achieved. This collaborative problem solving in the embedded setting is similar in concept to the big data paradigm now commonly proposed for commodity hardware and large databases. As the embedded devices become more capable and powerful the two concepts will combine. However, they are currently worlds apart, and thus forms the motivation of our research. In this project, a middleware layer is developed and tested to make the devices work collaboratively on local data within a network of Raspberry Pi devices. The middleware layer splits, distributes, computes and merges the computing tasks to accomplish a shared computing goal while performing the operations locally in a "shared nothing" architecture.},
keywords={Big Data;embedded systems;microcomputers;middleware;embedded middleware;distributed Raspberry Pi device;Big Data applications;embedded systems;Internet of Things;smart devices;compute-anywhere paradigm;distributed systems;distributed network;distributed embedded system architecture;embedded devices;middleware layer;shared nothing architecture;Middleware;Embedded systems;Universal Serial Bus;Computer architecture;Big Data;Servers;Linux;Raspberry Pi;Embedded Middleware},
doi={10.1109/CSE-EUC.2017.204},
ISSN={},
month={July},}
%Uses wireless mesh networks, PI and distributed processing
% Worth a lookfor future but very basic uses MPI and processing is done via Iperf
% and some synthetic tests called NAS parallel benchmerks
@INPROCEEDINGS{wmn_pi_dist_proc,
author={T. Oda and D. Elmazi and T. Ishitaki and A. Barolli and K. Matsuo and L. Barolli},
booktitle={2015 10th International Conference on Broadband and Wireless Computing, Communication and Applications (BWCCA)},
title={Experimental Results of a Raspberry Pi Based WMN Testbed for Multiple Flows and Distributed Concurrent Processing},
year={2015},
volume={},
number={},
pages={201-206},
abstract={Wireless Mesh Networks (WMNs) are attracting a lot of attention from wireless network researchers, because of their potential use in several fields such as collaborative computing and communications. Considering mobility of the terminals, routing is a key process for operation of WMNs. In this paper, we present the implementation of a testbed for WMNs. We analyze the performance of Optimized Link State Routing (OLSR) protocol and parallel distributed processing in an indoor scenario. For evaluation we considered hop count, delay, jitter and processing time metrics. The experimental results show that the nodes in the testbed were communicating smoothly. The processing time for node 1 is smaller than other nodes.},
keywords={Android (operating system);indoor radio;jitter;parallel processing;routing protocols;telecommunication computing;wireless mesh networks;jitter;delay;time metric processing;hop count evaluation;indoor scenario;parallel distributed processing;OLSR routing protocol;optimized link state routing protocol;wireless mesh networks;multiple flow processing;distributed concurrent processing;Raspberry Pi based WMN testbed;Ad hoc networks;Routing;Wireless sensor networks;Wireless communication;Delays;Network topology;Wireless Mesh Networks;Raspberry Pi;OLSR;Testbed;Distributed Concurrent Processing;Open MPI;Multiple Flows;Indoor Environment},
doi={10.1109/BWCCA.2015.95},
ISSN={},
month={Nov},}
% Old, but still looks promising
% but just a very proof of concept work.
% Must look for work of the authors if they continued this
@INPROCEEDINGS{middleware_for_writing,
author={M. Lescisin and Q. H. Mahmoud},
booktitle={2016 IEEE/ACM International Conference on Mobile Software Engineering and Systems (MOBILESoft)},
title={Middleware for Writing Distributed Applications on Physical Computing Devices},
year={2016},
volume={},
number={},
pages={21-22},
abstract={A computer program, at its most basic level is a series of low level processor instructions which are executed sequentially. These instructions take time to execute, thus longer programs have longer execution times. One way to decrease the execution time for a program is to decrease the required time for each instruction. This is called frequency scaling. The disadvantage of frequency scaling is that running a processor at higher speeds causes it to generate more heat and consume more power. The physical properties of transistors also impose limits on how fast a microprocessor can be built. The solution to the problem of frequency scaling is to, instead of decreasing the time to execute an instruction, increase the number of instructions that can be run in a given amount of time, by running these instructions in parallel. This is known as parallel computing, and in this paper we present a solution for using many off-the-shelf computers to build a computing cluster which will accelerate computing performance by running tasks in parallel. To this end, we introduce a middleware for writing distributed applications on physical computing devices, such as the Raspberry Pi computer.},
keywords={computer architecture;middleware;multiprocessing systems;parallel processing;middleware;distributed applications;physical computing;computer program;processor instructions;frequency scaling;microprocessor;parallel instructions;many off-the-shelf computers;Middleware;Parallel processing;Libraries;Switches;Portable computers;Computer architecture;Middleware;physical computing;distributed applications},
doi={10.1109/MobileSoft.2016.020},
ISSN={},
month={May},}
% More complete, uses dockers, pi and SVM for distributed data processing
% must read. Get references from here for related work
% Very very similar! but uses edge also (Digital ocean)
% But able to simulate a lot of sensors! and generally checked the cost efficiency of each pi
% SVM is not really discussed a lot (probably same setup as ours)
@INPROCEEDINGS{framework_docker_svm,
author={M. Al-Rakhami and M. Alsahli and M. M. Hassan and A. Alamri and A. Guerrieri and G. Fortino},
booktitle={2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)},
title={Cost Efficient Edge Intelligence Framework Using Docker Containers},
year={2018},
volume={},
number={},
pages={800-807},
abstract={The emergence of edge computing has its basis in the integration of the Internet of Things (IoT) with the Cloud computing. In order to make it possible, management technologies of data centers have to be combined with significantly more limited devices. The Docker technology, which provides a very lightweight and effective virtualization solution, can be utilized to manage, deploy and distribute edge/cloud applications onto clusters (that, in our case, will be composed by lightweight and small board devices-such as Raspberry Pi). We apply this on the human activity identification scenario. These types of edge devices can be very useful especially in cases when the combination of low costs and robustness is desirable due to various reasons and conditions. In our work, we propose and analyze a framework based on the distributed edge/cloud paradigm. It is able to provide an advantageous combination of various benefits and lower costs of data processing performed at the edge instead of central servers. Support Vector Machine (SVM) has been utilized for recognizing human activity via the proposed framework. The results of the use case are presented in detail in this paper along with the simulated experiment.},
keywords={artificial intelligence;cloud computing;Internet of Things;support vector machines;virtualisation;cost efficient edge intelligence framework;Docker containers;edge computing;IoT;Cloud computing;management technologies;data centers;Docker technology;lightweight virtualization solution;edge/cloud applications;lightweight board devices;small board devices;Raspberry Pi;human activity identification scenario;edge devices;distributed edge/cloud paradigm;Internet of Things;Cloud computing;Edge computing;Containers;Servers;Activity recognition;Computational modeling;Edge Intelligence;Edge Computing;Human Activity Recognition;Docker;Containers},
doi={10.1109/DASC/PiCom/DataCom/CyberSciTec.2018.00138},
ISSN={},
month={Aug},}
% Has some promise, feasibility experiments
% Also uses dockers and has some tests on Overhead as well and memory
% Apache httpd webpage and compared with docker, no docker, amazon web service, no distribution
@ARTICLE{7867723,
author={Y. Elkhatib and B. Porter and H. B. Ribeiro and M. F. Zhani and J. Qadir and E. Rivière},
journal={IEEE Internet Computing},
title={On Using Micro-Clouds to Deliver the Fog},
year={2017},
volume={21},
number={2},
pages={8-15},
abstract={The cloud is scalable and cost-efficient, but it isn't ideal for hosting all applications. Fog computing proposes an alternative of offloading some computation to the edge. Which applications to offload, where to send them, and when this should occur isn't entirely clear yet due to a lack of understanding of potential edge infrastructures. Through a number of experiments, the authors showcase the feasibility and readiness of micro-clouds formed by collections of Raspberry Pis to host a range of fog applications, particularly for network-constrained environments.},
keywords={cloud computing;Fog computing;microclouds;Raspberry Pi;network-constrained environments;Cloud computing;Servers;Economic indicators;Internet and Web services;Time factors;Memory management;Edge computing;Distributed processing;Internet/Web technologies;fog computing;micro-clouds;cloud computing;Internet of Things;distributed systems},
doi={10.1109/MIC.2017.35},
ISSN={1089-7801},
month={Mar},}
% mobile edge cloud platform
% Good intro about why there is a need for edge network
% Goal is to use LTE but for now use WIFI
% ThunderX processors, ARM IFC
% Use case is well presented, try to pattern paper here,
% What are LXC containers? how different are they from dockers, openstack?
% I think this is not distributed. just nodes on the edge
@INPROCEEDINGS{MEC-ConPaaS,
author={A. van Kempen and T. Crivat and B. Trubert and D. Roy and G. Pierre},
booktitle={2017 5th IEEE International Conference on Mobile Cloud Computing, Services, and Engineering (MobileCloud)},
title={MEC-ConPaaS: An Experimental Single-Board Based Mobile Edge Cloud},
year={2017},
volume={},
number={},
pages={17-24},
abstract={Cloud infrastructures are extremely flexible and powerful, but their data centers are located very far from the end users. To address the limitations of these systems, many researchers are designing mobile edge clouds which complement traditional clouds with additional resources located in immediate proximity of the end users. However, currently there exists no open-source mobile edge cloud implementation which can easily be deployed over a campus or a city center to support real-world experimentations. We therefore present the design and implementation of MEC-ConPaaS, a mobile-edge cloud platform, which aims to support future research on edge cloud applications and middlewares. The system exploits single-board computers such as Raspberry Pis which are an order of magnitude cheaper than any server machine and much easier to setup in a distributed setting. We demonstrate that these devices are powerful enough to support real cloud applications, and to support further research on these topics.},
keywords={cloud computing;computer centres;middleware;mobile computing;smart phones;MEC-ConPaaS;single-board based mobile edge cloud;cloud infrastructures;data centers;middleware;Raspberry Pi;Cloud computing;Mobile communication;Mobile computing;Computers;Servers;Open source software;Edge computing;mobile edge cloud;platform as a service;Raspberry Pi;Fog computing},
doi={10.1109/MobileCloud.2017.17},
ISSN={},
month={April},}
% OLD but also similar to my work
% The Linux container project LXC (not important here)
% Uses docker clusters too, uses Mesos (a cluster management platform)m, but i think docker swarm is also like this, but was not available back then
% Open stack for memory management?
% Good implementation and evaluation part discussion
@INPROCEEDINGS{paas_pi_cluster,
author={C. Pahl and S. Helmer and L. Miori and J. Sanin and B. Lee},
booktitle={2016 IEEE 4th International Conference on Future Internet of Things and Cloud Workshops (FiCloudW)},
title={A Container-Based Edge Cloud PaaS Architecture Based on Raspberry Pi Clusters},
year={2016},
volume={},
number={},
pages={117-124},
abstract={Cloud technology is moving towards multi-cloud environments with the inclusion of various devices. Cloud and IoT integration resulting in so-called edge cloud and fog computing has started. This requires the combination of data centre technologies with much more constrained devices, but still using virtualised solutions to deal with scalability, flexibility and multi-tenancy concerns. Lightweight virtualisation solutions do exist for this architectural setting with smaller, but still virtualised devices to provide application and platform technology as services. Containerisation is a solution component for lightweight virtualisation solution. Containers are furthermore relevant for cloud platform concerns dealt with by Platform-as-a-Service (PaaS) clouds like application packaging and orchestration. We demonstrate an architecture for edge cloud PaaS. For edge clouds, application and service orchestration can help to manage and orchestrate applications through containers. In this way, computation can be brought to the edge of the cloud, rather than data from the Internet-of-Things (IoT) to the cloud. We show that edge cloud requirements such as cost-efficiency, low power consumption, and robustness can be met by implementing container and cluster technology on small single-board devices like Raspberry Pis. This architecture can facilitate applications through distributed multi-cloud platforms built from a range of nodes from data centres to small devices, which we refer to as edge cloud. We illustrate key concepts of an edge cloud PaaS and refer to experimental and conceptual work to make that case.},
keywords={cloud computing;computer centres;Internet of Things;microcomputers;power aware computing;software architecture;virtualisation;container-based edge cloud PaaS architecture;Raspberry Pi clusters;cloud technology;multicloud environments;cloud IoT integration;fog computing;data centre technologies;lightweight virtualization solutions;virtualized devices;platform technology as services;containerisation;platform-as-a-service clouds;service orchestration;application orchestration;Internet-of-Things;power consumption;single-board devices;Container;Cluster;Cloud;PaaS;Edge Cloud;Orchestration;Single-board Computer;Raspberry Pi},
doi={10.1109/W-FiCloud.2016.36},
ISSN={},
month={Aug},}
% This is very interesting. same process as ours, modeling in one place but then fragments it
% Need to read more on the "fragmentation" that they say. but so far can't really see it, also has aggregator
% Node-RED
% Also cited a lot of things
@INPROCEEDINGS{8029273,
author={R. Jain and S. Tata},
booktitle={2017 IEEE International Conference on Edge Computing (EDGE)},
title={Cloud to Edge: Distributed Deployment of Process-Aware IoT Applications},
year={2017},
volume={},
number={},
pages={182-189},
abstract={The Internet of Things (IoT) integrates a large number of heterogeneous and pervasive objects that continuously generate information about the physical world. These objects, through standard communication protocols and unique addressing schemes provide services to the final users or systems. IoT is envisioned to bring together billions of devices, also denoted as smart objects, by connecting them in an Internet-like structure, allowing them to communicate and exchange information and to enable new forms of interaction among things and people. Given the distributed nature of IoT applications, it is often the case that the application is modeled, developed and tested on each compute node, and connected to other nodes in the application network for achieving the end result. Therefore the deployment of application is also, more or less, done on each individual compute node. In this paper we propose an approach where the IoT application can be modeled in one place, where after modeling, the different pieces of application are annotated with location information, and based on this annotation, the application is decomposed into fragments that are deployed to corresponding individual compute nodes, automatically generating code to remotely connect the application fragments to other application fragments on other compute nodes in the edge or in the cloud.},
keywords={home computing;Internet;Internet of Things;protocols;ubiquitous computing;standard communication protocols;unique addressing schemes;smart objects;Internet-like structure;application network;individual compute node;location information;application fragments;distributed deployment;process-aware IoT applications;heterogeneous objects;pervasive objects;Internet of Things;Temperature sensors;Logic gates;Hospitals;Cloud computing;Computational modeling;Business;Distributed IoT;Internet of Things;Process-Aware Applications;Business Process Management},
doi={10.1109/IEEE.EDGE.2017.32},
ISSN={},
month={June},}
% I think this affects more the in-situ part since resource allocation
% Also for microcenters
@INPROCEEDINGS{8029256,
author={J. Xu and B. Palanisamy and H. Ludwig and Q. Wang},
booktitle={2017 IEEE International Conference on Edge Computing (EDGE)},
title={Zenith: Utility-Aware Resource Allocation for Edge Computing},
year={2017},
volume={},
number={},
pages={47-54},
abstract={In the Internet of Things(IoT) era, the demands for low-latency computing for time-sensitive applications (e.g., location-based augmented reality games, real-time smart grid management, real-time navigation using wearables) has been growing rapidly. Edge Computing provides an additional layer of infrastructure to fill latency gaps between the IoT devices and the back-end computing infrastructure. In the edge computing model, small-scale micro-datacenters that represent ad-hoc and distributed collection of computing infrastructure pose new challenges in terms of management and effective resource sharing to achieve a globally efficient resource allocation. In this paper, we propose Zenith, a novel model for allocating computing resources in an edge computing platform that allows service providers to establish resource sharing contracts with edge infrastructure providers apriori. Based on the established contracts, service providers employ a latency-aware scheduling and resource provisioning algorithm that enables tasks to complete and meet their latency requirements. The proposed techniques are evaluated through extensive experiments that demonstrate the effectiveness, scalability and performance efficiency of the proposed model.},
keywords={cloud computing;computer centres;Internet of Things;resource allocation;scheduling;Zenith;utility-aware resource allocation;time-sensitive applications;augmented reality games;real-time smart grid management;real-time navigation;latency gaps;IoT devices;back-end computing infrastructure;edge computing model;globally efficient resource allocation;edge computing platform;service providers;resource sharing contracts;latency-aware scheduling;resource provisioning algorithm;latency requirements;Internet of Things;edge infrastructure providers;Edge computing;Computational modeling;Resource management;Containers;Contracts;Logic gates;Cloud computing;fog computing;edge computing;resource allocation},
doi={10.1109/IEEE.EDGE.2017.15},
ISSN={},
month={June},}
% Also aiming to meet better QoS through task distribution
% A lot of math for the distribution and how it becomes non-linear, possibly useful
@INPROCEEDINGS{8029254,
author={Y. Song and S. S. Yau and R. Yu and X. Zhang and G. Xue},
booktitle={2017 IEEE International Conference on Edge Computing (EDGE)},
title={An Approach to QoS-based Task Distribution in Edge Computing Networks for IoT Applications},
year={2017},
volume={},
number={},
pages={32-39},
abstract={Internet of Things (IoT) is emerging as part of thn infrastructures for advancing a large variety of applications involving connection of many intelligent devices, leading to smart communities. Due to the severe limitation on the computing resources of IoT devices, it is common to off load tasks of various applications requiring substantial computing resources to computing systems with sufficient computing resources, such as servers, cloud systems, and/or data centers for processing. However, the off loading method suffers from the difficulties of high latency and network congestion in the IoT infrastructures. Recently edge computing has emerged to reduce the negative impacts of these difficulties. Yet, edge computing has its drawbacks, such as the limited computing resources of some edge computing devices and the unbalanced load among these devices. In order to effectively explore the potential of edge computing to support IoT applications,it is necessary to have efficient task management in edge computing networks. In this paper, an approach is presented to periodically distributing incoming tasks in the edge computing network so that the number of tasks, which can be processed in the edge computing network, is increased, and the quality of-service (QoS) requirements of the tasks completed in the edge computing network are satisfied. Simulation results are presented to show the improvement of using this approach on the increase of the number of tasks to be completed in the edge computing network.},
keywords={cloud computing;computer networks;Internet of Things;quality of service;resource allocation;edge computing network;task distribution;Internet of Things;IoT;network congestion;unbalanced load;task management;quality of-service;QoS;edge computing devices;Edge computing;Bandwidth;Security;Quality of service;Cloud computing;Data communication;Optimization;Keywords-edge computing; task distribution; quality-ofservice;},
doi={10.1109/IEEE.EDGE.2017.50},
ISSN={},
month={June},}
% Readily describes the limitation of existing technology
% Good architecture diagram i think
% Check the typical application
% Only architecture
@INPROCEEDINGS{8421879,
author={Q. Jin and R. Lin and H. Zou and F. Yang},
booktitle={2018 5th IEEE International Conference on Cyber Security and Cloud Computing (CSCloud)/2018 4th IEEE International Conference on Edge Computing and Scalable Cloud (EdgeCom)},
title={A Distributed Fog Computing Architecture Supporting Multiple Migrating Mode},
year={2018},
volume={},
number={},
pages={218-223},
abstract={Fog computing is a new computing mode. As a derivative of cloud computing, fog computing can solve the problems of high latency, overloaded center server and overloaded bandwidth of network. There are many kinds of existing fog computing architectures, which lacks flexibility in handling of tasks and interaction of networks. In this paper, based on the advantages and problems of the existing fog computing architectures, we propose a distributed fog computing architecture that supports multiple migrating mode, which include center-edge, edge-edge migrating. The main research contents and contributions of this paper are as follows: (1) Propose a distributed architecture that contains user management node, network management node and service node on center and edge network. (2) Design a scalable distributed computing node model in Edge Network. (3) Carry out a user registration method based on distributed storage. (4) Design a service migration model based on the load of Network, which included the center-edge and edge-edge migrating support.},
keywords={cloud computing;computer centres;computer network management;resource allocation;computing mode;cloud computing;overloaded center server;center-edge;distributed architecture;network management node;service node;edge network;edge-edge migrating support;fog computing architectures;distributed fog computing architecture;multiple migrating mode;Edge computing;Computer architecture;Task analysis;Image edge detection;Cloud computing;Security;Synchronization;fog computing;architecture;distribution},
doi={10.1109/CSCloud/EdgeCom.2018.00046},
ISSN={},
month={June},}
% edge Mesh provides many benefits, including distributed processing, low latency, fault tolerance, better scalability, better security, and privacy
% many citations, maybe useful
% shows reference on centralized cloud computing, fog and cooperative etc..
% Also uses wireless mesh networks
% i see data sharing, but where is distributed processing --> 3) Load Distribution: Computation tasks can be offloaded (this?)
% This seems intense already!?
@ARTICLE{edge_mesh,
author={Y. Sahni and J. Cao and S. Zhang and L. Yang},
journal={IEEE Access},
title={Edge Mesh: A New Paradigm to Enable Distributed Intelligence in Internet of Things},
year={2017},
volume={5},
number={},
pages={16441-16458},
abstract={In recent years, there has been a paradigm shift in Internet of Things (IoT) from centralized cloud computing to edge computing (or fog computing). Developments in ICT have resulted in the significant increment of communication and computation capabilities of embedded devices and this will continue to increase in coming years. However, existing paradigms do not utilize low-level devices for any decision-making process. In fact, gateway devices are also utilized mostly for communication interoperability and some low-level processing. In this paper, we have proposed a new computing paradigm, named Edge Mesh, which distributes the decision-making tasks among edge devices within the network instead of sending all the data to a centralized server. All the computation tasks and data are shared using a mesh network of edge devices and routers. Edge Mesh provides many benefits, including distributed processing, low latency, fault tolerance, better scalability, better security, and privacy. These benefits are useful for critical applications, which require higher reliability, real-time processing, mobility support, and context awareness. We first give an overview of existing computing paradigms to establish the motivation behind Edge Mesh. Then, we describe in detail about the Edge Mesh computing paradigm, including the proposed software framework, research challenges, and benefits of Edge Mesh. We have also described the task management framework and done a preliminary study on task allocation problem in Edge Mesh. Different application scenarios, including smart home, intelligent transportation system, and healthcare, are presented to illustrate the significance of Edge Mesh computing paradigm.},
keywords={cloud computing;embedded systems;Internet of Things;internetworking;mobility management (mobile radio);network servers;open systems;software fault tolerance;intelligent transportation system;healthcare;smart home;task allocation problem;task management framework;software framework;context awareness;mobility support;real-time processing;fault tolerance;distributed processing;centralized server;decision-making tasks;communication interoperability;gateway devices;embedded devices;computation capabilities;communication capabilities;ICT;fog computing;edge computing;centralized cloud computing;IoT;Internet-of-Things;distributed intelligence;edge mesh computing paradigm;Cloud computing;Edge computing;Servers;Resource management;Security;Decision making;Sensors;Edge devices;Internet of Things;distributed intelligence;distributed computing;mesh network},
doi={10.1109/ACCESS.2017.2739804},
ISSN={2169-3536},
month={},}
% From nakamura's lab in germany i think
% mostly for heterogeneity
@INPROCEEDINGS{7756224,
author={D. Schäfer and J. Edinger and S. VanSyckel and J. M. Paluska and C. Becker},
booktitle={2016 IEEE 36th International Conference on Distributed Computing Systems Workshops (ICDCSW)},
title={Tasklets: Overcoming Heterogeneity in Distributed Computing Systems},
year={2016},
volume={},
number={},
pages={156-161},
abstract={Distributed computing is a good alternative to expensive supercomputers. There are plenty of frameworks that enable programmers to harvest remote computing power. However, until today, much computation power in the edges of the Internet remains unused. While idle devices could contribute to a distributed environment as generic computation resources, computation-intense applications could use this pool of resources to enhance their execution quality. In this paper, we identify heterogeneity as a major burden for distributed and edge computing. Heterogeneity is present in multiple forms. We draw our vision of a comprehensive distributed computing system and show where existing frameworks fall short in dealing with the heterogeneity of distributed computing. Afterwards, we present the Tasklet system, our approach for a distributed computing framework. Tasklets are fine-grained computation units that can be issued for remote and local execution. We tackle the different dimensions of heterogeneity and show how to make use of available computation power in edge resources. In our prototype, we use middleware and virtualization technologies as well as a host language concept.},
keywords={grid computing;Internet;middleware;resource allocation;virtualisation;distributed computing systems;remote computing;Internet;distributed environment;computation-intense applications;edge computing;tasklet system;fine-grained computation units;middleware;virtualization;host language concept;Hardware;Cloud computing;Grid computing;Computer architecture;Operating systems;Distributed Computing;Edge-centric Computing;Heterogeneity;Resource Sharing;Grid Computing},
doi={10.1109/ICDCSW.2016.22},
ISSN={2332-5666},
month={June},}
% Just reference for the virtuailization, also look for the reference for the Pi and docker!
@INPROCEEDINGS{7092949,
author={R. Morabito and J. Kjällman and M. Komu},
booktitle={2015 IEEE International Conference on Cloud Engineering},
title={Hypervisors vs. Lightweight Virtualization: A Performance Comparison},
year={2015},
volume={},
number={},
pages={386-393},
abstract={Virtualization of operating systems provides a common way to run different services in the cloud. Recently, the lightweight virtualization technologies claim to offer superior performance. In this paper, we present a detailed performance comparison of traditional hypervisor based virtualization and new lightweight solutions. In our measurements, we use several benchmarks tools in order to understand the strengths, weaknesses, and anomalies introduced by these different platforms in terms of processing, storage, memory and network. Our results show that containers achieve generally better performance when compared with traditional virtual machines and other recent solutions. Albeit containers offer clearly more dense deployment of virtual machines, the performance difference with other technologies is in many cases relatively small.},
keywords={operating systems (computers);virtual machines;virtualisation;hypervisors;lightweight virtualization technologies;operating systems;benchmarks tools;strengths;weaknesses;anomalies;processing;storage;memory;network;virtual machines;Albeit containers;Containers;Virtual machine monitors;Virtualization;Linux;Benchmark testing;Operating systems;Performance; Benchmarking; Virtualization; Hypervisor; Container},
doi={10.1109/IC2E.2015.74},
ISSN={},
month={March},}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment