add cinit-0.3pre15
Signed-off-by: Nico Schottelius <nico@ikn.schottelius.org>
This commit is contained in:
parent
36de902919
commit
440caeb555
1013 changed files with 99995 additions and 0 deletions
169
software/cinit/browse_source/cinit-0.3pre15/doc/devel/ipc.text
Normal file
169
software/cinit/browse_source/cinit-0.3pre15/doc/devel/ipc.text
Normal file
|
|
@ -0,0 +1,169 @@
|
|||
IPC - in cinit and in general
|
||||
=============================
|
||||
Nico Schottelius <nico-linux-cinit__@__schottelius.org>
|
||||
0.1, Initial Version from 2006-07-09
|
||||
:Author Initials: NS
|
||||
|
||||
IPC - Inter process communication
|
||||
|
||||
Introduction
|
||||
------------
|
||||
This document describes the IPC methods used and/or tested for
|
||||
cinit.
|
||||
|
||||
It does not describe in detail, how the different methods work
|
||||
(this is already done many times, there's great documentation
|
||||
available online), but more the advantages and disadvantages
|
||||
(especially for an init system).
|
||||
|
||||
What is IPC?
|
||||
~~~~~~~~~~~~
|
||||
IPC describes methods to communicate between different processes
|
||||
(programs).
|
||||
|
||||
IPC as described by SUSV3 (The Single UNIX Specification Version 3)
|
||||
aka IEEE Std 1003.1, 2004 Edition aka POSIX only defines
|
||||
MSQ, SHM and Semaphores as IPC. This document also covers
|
||||
Sockets and FIFOs.
|
||||
|
||||
|
||||
What is not (yet) covered by this document?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Speed of different IPC methods, different behaviour on different
|
||||
unices.
|
||||
|
||||
|
||||
Why do you need IPC for an init system?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
cinit IPC history + analysis
|
||||
----------------------------
|
||||
|
||||
|
||||
FIFOs
|
||||
~~~~~
|
||||
First in - first out
|
||||
File on the filesystem
|
||||
Always need two files for two way communication
|
||||
|
||||
Hints
|
||||
^^^^^
|
||||
The first idea for IPC in cinit was to use two FIFOs, like minit does.
|
||||
Wrong assumption:
|
||||
runit also uses fifos, but two fifos for each services. That way
|
||||
more parallel transmissions is possible.
|
||||
You have to pay attention: Maximum number of opened files!
|
||||
(On Linux 2.6 this is 1024, which makes a maximum of 512 services.
|
||||
This limit is most likely never reached, but you have to keep it
|
||||
in mind).
|
||||
|
||||
Sockets
|
||||
~~~~~~~
|
||||
Clean and beautiful. They allow easy two way communication.
|
||||
|
||||
If you created a socket, fs is r/o, you cannot use it, although
|
||||
there is SO_REUSE.
|
||||
|
||||
First method: Using memory mapped part (tmpfs).
|
||||
Second method: use interal communication (pipes!) and external after
|
||||
/etc/cinit/ becomes writable.
|
||||
|
||||
|
||||
Current IPC configuration
|
||||
--------------------------
|
||||
|
||||
- switchable (conf/ipc_method)
|
||||
- each ipc implementation needs:
|
||||
|
||||
ipc.h -> for global variables and ipc specific things
|
||||
prefix variables with ipc_
|
||||
int cinit_ipc_init(void); -> general initialization
|
||||
return 1 on success, 0 on failure
|
||||
int cinit_ipc_listen(void); -> begin to listen for messages
|
||||
int cinit_ipc_send(void *data) -> send data to a client
|
||||
|
||||
Abstraction layer: cinit_ipc_*
|
||||
------------------------------
|
||||
You can choose or even reimplement ipc code for cinit. You only have to create
|
||||
a directory below src/ipc/ and create the following necessary functions:
|
||||
|
||||
int cinit_ipc_init(void);
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Initialise the IPC functions in cinit.
|
||||
|
||||
|
||||
int cinit_ipc_listen(void);
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Never ending looping function that listens for commands and passes the
|
||||
retrieved command to read_command().
|
||||
|
||||
other
|
||||
~~~~~~
|
||||
int cinit_ipc_sclose(void); /* fork of cinit come from outside! */
|
||||
int cinit_ipc_ssend(void *data); /* send to a client from the server */
|
||||
void cinit_ipc_destroy(void); /* destroy ipc handler in cinit */
|
||||
|
||||
|
||||
cinit_get_data(int ident, int size, void *data) => read size bytes from client
|
||||
with ident
|
||||
|
||||
/*****************************************************************************
|
||||
* Functions: in clients
|
||||
*/
|
||||
int cinit_ipc_logon(void); /* logon to init (client init) */
|
||||
int cinit_ipc_connect(void); /* connect to init */
|
||||
int cinit_ipc_csend(struct cinit_message *data); /* send to the server from a client */
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
Messages:
|
||||
|
||||
struct cinit_question qsn;
|
||||
struct cinit_answer asr;
|
||||
|
||||
question:
|
||||
|
||||
answer:
|
||||
|
||||
int cinit_send_to(struct cinit_question *data, struct cinit_answer *res)
|
||||
-> return 0 on ipc errors
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
enable / disable services:
|
||||
|
||||
- send svc
|
||||
- send flags
|
||||
|
||||
- recv return:
|
||||
CINIT_ASW_SVC_STOPPED: successfully stopped the service
|
||||
|
||||
CINIT_ASW_SVC_ERR: failed to stop the service
|
||||
CINIT_ASW_SVC_WANTS: wants failed
|
||||
CINIT_ASW_SVC_NEEDS: needs failed
|
||||
opt contains number of failed services
|
||||
retriev them from cinit after that
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
TO SORT:
|
||||
|
||||
1. message queues
|
||||
o clients schreiben rein
|
||||
o datenpaket gross genug fuer antwort?
|
||||
|
||||
2. shared memory
|
||||
o client kann direkt rauslesen, was gestartet werden muss
|
||||
o client kann direkt abhaengigkeiten starten, muss diese aber
|
||||
eintragen
|
||||
|
||||
Ausprobieren:
|
||||
- Mutexe
|
||||
- Message Queues
|
||||
-
|
||||
|
||||
Ideen:
|
||||
Message queue
|
||||
1. Client geht auf die normale
|
||||
als id = pid (cast!)
|
||||
dann rueck via anderer queue
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue