ios - Audio Recording AudioQueueStart buffer never filled -


i using audioqueuestart in order start recording on ios device , want recording data streamed me in buffers can process them , send them server.

basic functionality works great in bufferfilled function < 10 bytes of data on every call. feels inefficient. since have tried set buffer size 16384 btyes (see beginning of startrecording method)

how can make fill buffer more before calling bufferfilled? or need make second phase buffering before sending server achieve want?

osstatus bufferfilled(void *aqdata, sint64 inposition, uint32 requestcount, const void *inbuffer, uint32 *actualcount) {     aqrecorderstate *paqdata = (aqrecorderstate*)aqdata;      nsdata *audiodata = [nsdata datawithbytes:inbuffer length:requestcount];      *actualcount = inbuffer + requestcount;      //audiodata ususally < 10 bytes, 100 bytes never close 16384 bytes           return 0; }  void handleinputbuffer(void *aqdata, audioqueueref inaq, audioqueuebufferref inbuffer, const audiotimestamp *instarttime, uint32 innumpackets, const audiostreampacketdescription *inpacketdesc) {     aqrecorderstate *paqdata = (aqrecorderstate*)aqdata;      if (innumpackets == 0 && paqdata->mdataformat.mbytesperpacket != 0)         innumpackets = inbuffer->maudiodatabytesize / paqdata->mdataformat.mbytesperpacket;      if(audiofilewritepackets(paqdata->maudiofile, false, inbuffer->maudiodatabytesize, inpacketdesc, paqdata->mcurrentpacket, &innumpackets, inbuffer->maudiodata) == noerr) {         paqdata->mcurrentpacket += innumpackets;     }      if (paqdata->misrunning == 0)         return;      osstatus error = audioqueueenqueuebuffer(paqdata->mqueue, inbuffer, 0, null); }   void derivebuffersize(audioqueueref audioqueue, audiostreambasicdescription *asbdescription, float64 seconds, uint32 *outbuffersize) {     static const int maxbuffersize = 0x50000;      int maxpacketsize = asbdescription->mbytesperpacket;     if (maxpacketsize == 0) {         uint32 maxvbrpacketsize = sizeof(maxpacketsize);         audioqueuegetproperty(audioqueue, kaudioqueueproperty_maximumoutputpacketsize, &maxpacketsize, &maxvbrpacketsize);     }      float64 numbytesfortime = asbdescription->msamplerate * maxpacketsize * seconds;     *outbuffersize = (uint32)(numbytesfortime < maxbuffersize ? numbytesfortime : maxbuffersize); }  osstatus setmagiccookieforfile (audioqueueref inqueue, audiofileid   infile) {     osstatus result = noerr;     uint32 cookiesize;      if (audioqueuegetpropertysize (inqueue, kaudioqueueproperty_magiccookie, &cookiesize) == noerr) {         char* magiccookie =         (char *) malloc (cookiesize);         if (audioqueuegetproperty (inqueue, kaudioqueueproperty_magiccookie, magiccookie, &cookiesize) == noerr)             result = audiofilesetproperty (infile, kaudiofilepropertymagiccookiedata, cookiesize, magiccookie);         free(magiccookie);     }     return result; }   - (void)startrecording {      aqdata.mdataformat.mformatid         = kaudioformatmpeg4aac;     aqdata.mdataformat.msamplerate       = 22050.0;     aqdata.mdataformat.mchannelsperframe = 1;     aqdata.mdataformat.mbitsperchannel   = 0;     aqdata.mdataformat.mbytesperpacket   = 0;     aqdata.mdataformat.mbytesperframe    = 0;     aqdata.mdataformat.mframesperpacket  = 1024;     aqdata.mdataformat.mformatflags      = kmpeg4object_aac_main;     audiofiletypeid filetype             = kaudiofileaac_adtstype;     aqdata.bufferbytesize = 16384;       uint32 defaulttospeaker = true;     audiosessionsetproperty(kaudiosessionproperty_overridecategorydefaulttospeaker, sizeof(defaulttospeaker), &defaulttospeaker);      osstatus status = audioqueuenewinput(&aqdata.mdataformat, handleinputbuffer, &aqdata, null, kcfrunloopcommonmodes, 0, &aqdata.mqueue);     uint32 dataformatsize = sizeof (aqdata.mdataformat);            status = audioqueuegetproperty(aqdata.mqueue, kaudioqueueproperty_streamdescription, &aqdata.mdataformat, &dataformatsize);     status = audiofileinitializewithcallbacks(&aqdata, nil, bufferfilled, nil, nil, filetype, &aqdata.mdataformat, 0, &aqdata.maudiofile);      (int = 0; < knumberbuffers; ++i) {         status = audioqueueallocatebuffer (aqdata.mqueue, aqdata.bufferbytesize, &aqdata.mbuffers[i]);         status = audioqueueenqueuebuffer (aqdata.mqueue, aqdata.mbuffers[i], 0, null);     }      aqdata.mcurrentpacket = 0;                                aqdata.misrunning = true;                                  status = audioqueuestart(aqdata.mqueue, null); } 

update: have logged data receive , quite interesting, seems half of "packets" kind of header , half sound data. assume how aac encoding on ios works? writes header in 1 buffer, data in next 1 , on. , never wants more around 170-180 bytes each data chunk , why ignores large buffer?


Comments

Popular posts from this blog

android - InAppBilling registering BroadcastReceiver in AndroidManifest -

python Tkinter Capturing keyboard events save as one single string -

sql server - Why does Linq-to-SQL add unnecessary COUNT()? -